summaryrefslogtreecommitdiff
path: root/chromium/media
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2015-06-18 14:10:49 +0200
committerOswald Buddenhagen <oswald.buddenhagen@theqtcompany.com>2015-06-18 13:53:24 +0000
commit813fbf95af77a531c57a8c497345ad2c61d475b3 (patch)
tree821b2c8de8365f21b6c9ba17a236fb3006a1d506 /chromium/media
parentaf6588f8d723931a298c995fa97259bb7f7deb55 (diff)
downloadqtwebengine-chromium-813fbf95af77a531c57a8c497345ad2c61d475b3.tar.gz
BASELINE: Update chromium to 44.0.2403.47
Change-Id: Ie056fedba95cf5e5c76b30c4b2c80fca4764aa2f Reviewed-by: Oswald Buddenhagen <oswald.buddenhagen@theqtcompany.com>
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/BUILD.gn433
-rw-r--r--chromium/media/DEPS5
-rw-r--r--chromium/media/OWNERS13
-rw-r--r--chromium/media/PRESUBMIT.py2
-rw-r--r--chromium/media/audio/BUILD.gn77
-rw-r--r--chromium/media/audio/OWNERS3
-rw-r--r--chromium/media/audio/agc_audio_stream.h5
-rw-r--r--chromium/media/audio/alsa/alsa_input.cc1
-rw-r--r--chromium/media/audio/alsa/alsa_output.cc99
-rw-r--r--chromium/media/audio/alsa/alsa_output_unittest.cc27
-rw-r--r--chromium/media/audio/alsa/alsa_util.cc1
-rw-r--r--chromium/media/audio/alsa/alsa_wrapper.cc1
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.cc13
-rw-r--r--chromium/media/audio/android/audio_android_unittest.cc98
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc33
-rw-r--r--chromium/media/audio/android/audio_manager_android.h45
-rw-r--r--chromium/media/audio/android/audio_record_input.cc3
-rw-r--r--chromium/media/audio/android/audio_record_input.h22
-rw-r--r--chromium/media/audio/android/opensles_input.cc5
-rw-r--r--chromium/media/audio/android/opensles_input.h22
-rw-r--r--chromium/media/audio/android/opensles_output.cc8
-rw-r--r--chromium/media/audio/android/opensles_output.h14
-rw-r--r--chromium/media/audio/audio_device_thread.cc11
-rw-r--r--chromium/media/audio/audio_input_controller.cc17
-rw-r--r--chromium/media/audio/audio_input_device.cc9
-rw-r--r--chromium/media/audio/audio_input_unittest.cc41
-rw-r--r--chromium/media/audio/audio_input_volume_unittest.cc23
-rw-r--r--chromium/media/audio/audio_io.h7
-rw-r--r--chromium/media/audio/audio_low_latency_input_output_unittest.cc15
-rw-r--r--chromium/media/audio/audio_manager.cc192
-rw-r--r--chromium/media/audio/audio_manager.h35
-rw-r--r--chromium/media/audio/audio_manager_base.cc9
-rw-r--r--chromium/media/audio/audio_manager_factory.h28
-rw-r--r--chromium/media/audio/audio_manager_factory_unittest.cc58
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc81
-rw-r--r--chromium/media/audio/audio_output_controller.cc6
-rw-r--r--chromium/media/audio/audio_output_controller.h1
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc3
-rw-r--r--chromium/media/audio/audio_output_device.cc30
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc5
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h3
-rw-r--r--chromium/media/audio/audio_output_proxy_unittest.cc53
-rw-r--r--chromium/media/audio/audio_output_resampler.cc38
-rw-r--r--chromium/media/audio/audio_output_resampler.h28
-rw-r--r--chromium/media/audio/audio_parameters.cc9
-rw-r--r--chromium/media/audio/audio_parameters.h1
-rw-r--r--chromium/media/audio/audio_parameters_unittest.cc21
-rw-r--r--chromium/media/audio/audio_power_monitor.cc3
-rw-r--r--chromium/media/audio/audio_power_monitor_unittest.cc7
-rw-r--r--chromium/media/audio/audio_unittest_util.cc29
-rw-r--r--chromium/media/audio/audio_unittest_util.h34
-rw-r--r--chromium/media/audio/clockless_audio_sink.cc6
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.h34
-rw-r--r--chromium/media/audio/cras/cras_input.cc10
-rw-r--r--chromium/media/audio/cras/cras_input.h21
-rw-r--r--chromium/media/audio/cras/cras_input_unittest.cc2
-rw-r--r--chromium/media/audio/cras/cras_unified.h14
-rw-r--r--chromium/media/audio/cras/cras_unified_unittest.cc16
-rw-r--r--chromium/media/audio/fake_audio_input_stream.cc200
-rw-r--r--chromium/media/audio/fake_audio_input_stream.h51
-rw-r--r--chromium/media/audio/fake_audio_output_stream.cc12
-rw-r--r--chromium/media/audio/fake_audio_output_stream.h7
-rw-r--r--chromium/media/audio/fake_audio_worker.cc (renamed from chromium/media/audio/fake_audio_consumer.cc)83
-rw-r--r--chromium/media/audio/fake_audio_worker.h (renamed from chromium/media/audio/fake_audio_consumer.h)33
-rw-r--r--chromium/media/audio/fake_audio_worker_unittest.cc (renamed from chromium/media/audio/fake_audio_consumer_unittest.cc)69
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.cc2
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.cc17
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac_unittest.cc19
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac.cc1
-rw-r--r--chromium/media/audio/mac/audio_input_mac.cc3
-rw-r--r--chromium/media/audio/mac/audio_input_mac.h2
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.cc26
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.h7
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc38
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc2
-rw-r--r--chromium/media/audio/null_audio_sink.cc19
-rw-r--r--chromium/media/audio/null_audio_sink.h7
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.h18
-rw-r--r--chromium/media/audio/pulse/pulse.sigs4
-rw-r--r--chromium/media/audio/pulse/pulse_input.cc6
-rw-r--r--chromium/media/audio/pulse/pulse_input.h3
-rw-r--r--chromium/media/audio/pulse/pulse_output.cc1
-rw-r--r--chromium/media/audio/pulse/pulse_util.cc39
-rw-r--r--chromium/media/audio/simple_sources.cc238
-rw-r--r--chromium/media/audio/simple_sources.h55
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler.cc19
-rw-r--r--chromium/media/audio/sounds/sounds_manager.cc2
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.cc30
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.h19
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler_unittest.cc12
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.cc23
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.h10
-rw-r--r--chromium/media/audio/virtual_audio_input_stream_unittest.cc12
-rw-r--r--chromium/media/audio/virtual_audio_output_stream.cc7
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.cc20
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.h20
-rw-r--r--chromium/media/audio/win/audio_device_listener_win_unittest.cc11
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc30
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.h20
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win_unittest.cc73
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc81
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h16
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc112
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc17
-rw-r--r--chromium/media/audio/win/audio_manager_win.h38
-rw-r--r--chromium/media/audio/win/audio_output_win_unittest.cc84
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc186
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc225
-rw-r--r--chromium/media/audio/win/wavein_input_win.cc3
-rw-r--r--chromium/media/audio/win/wavein_input_win.h22
-rw-r--r--chromium/media/audio/win/waveout_output_win.cc4
-rw-r--r--chromium/media/audio/win/waveout_output_win.h14
-rw-r--r--chromium/media/audio_unittests.isolate70
-rw-r--r--chromium/media/base/BUILD.gn177
-rw-r--r--chromium/media/base/android/BUILD.gn21
-rw-r--r--chromium/media/base/android/audio_decoder_job.cc66
-rw-r--r--chromium/media/base/android/audio_decoder_job.h30
-rw-r--r--chromium/media/base/android/browser_cdm_factory_android.cc53
-rw-r--r--chromium/media/base/android/browser_cdm_factory_android.h34
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.cc4
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.h7
-rw-r--r--chromium/media/base/android/media_client_android.cc37
-rw-r--r--chromium/media/base/android/media_client_android.h59
-rw-r--r--chromium/media/base/android/media_codec_bridge.cc74
-rw-r--r--chromium/media/base/android/media_codec_bridge.h20
-rw-r--r--chromium/media/base/android/media_codec_bridge_unittest.cc34
-rw-r--r--chromium/media/base/android/media_codec_player.cc252
-rw-r--r--chromium/media/base/android/media_codec_player.h85
-rw-r--r--chromium/media/base/android/media_decoder_job.cc85
-rw-r--r--chromium/media/base/android/media_decoder_job.h56
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc560
-rw-r--r--chromium/media/base/android/media_drm_bridge.h124
-rw-r--r--chromium/media/base/android/media_drm_bridge_delegate.cc27
-rw-r--r--chromium/media/base/android/media_drm_bridge_delegate.h46
-rw-r--r--chromium/media/base/android/media_drm_bridge_unittest.cc79
-rw-r--r--chromium/media/base/android/media_player_android.cc24
-rw-r--r--chromium/media/base/android/media_player_android.h13
-rw-r--r--chromium/media/base/android/media_player_bridge.cc42
-rw-r--r--chromium/media/base/android/media_player_bridge.h46
-rw-r--r--chromium/media/base/android/media_player_listener.cc4
-rw-r--r--chromium/media/base/android/media_player_manager.h13
-rw-r--r--chromium/media/base/android/media_source_player.cc111
-rw-r--r--chromium/media/base/android/media_source_player.h49
-rw-r--r--chromium/media/base/android/media_source_player_unittest.cc259
-rw-r--r--chromium/media/base/android/video_decoder_job.cc35
-rw-r--r--chromium/media/base/android/video_decoder_job.h33
-rw-r--r--chromium/media/base/android/webaudio_media_codec_bridge.cc11
-rw-r--r--chromium/media/base/audio_buffer.cc24
-rw-r--r--chromium/media/base/audio_buffer_unittest.cc10
-rw-r--r--chromium/media/base/audio_bus.cc8
-rw-r--r--chromium/media/base/audio_bus_perftest.cc8
-rw-r--r--chromium/media/base/audio_converter.cc3
-rw-r--r--chromium/media/base/audio_converter_perftest.cc4
-rw-r--r--chromium/media/base/audio_converter_unittest.cc17
-rw-r--r--chromium/media/base/audio_decoder_config.cc40
-rw-r--r--chromium/media/base/audio_decoder_config.h5
-rw-r--r--chromium/media/base/audio_discard_helper.cc1
-rw-r--r--chromium/media/base/audio_renderer.h27
-rw-r--r--chromium/media/base/audio_shifter.cc287
-rw-r--r--chromium/media/base/audio_shifter.h139
-rw-r--r--chromium/media/base/audio_shifter_unittest.cc209
-rw-r--r--chromium/media/base/audio_splicer_unittest.cc2
-rw-r--r--chromium/media/base/bind_to_current_loop.h118
-rw-r--r--chromium/media/base/bind_to_current_loop.h.pump86
-rw-r--r--chromium/media/base/browser_cdm.h39
-rw-r--r--chromium/media/base/browser_cdm_factory.cc47
-rw-r--r--chromium/media/base/browser_cdm_factory.h38
-rw-r--r--chromium/media/base/cdm_callback_promise.cc1
-rw-r--r--chromium/media/base/cdm_config.h29
-rw-r--r--chromium/media/base/cdm_context.cc16
-rw-r--r--chromium/media/base/cdm_context.h53
-rw-r--r--chromium/media/base/cdm_factory.h18
-rw-r--r--chromium/media/base/cdm_key_information.cc16
-rw-r--r--chromium/media/base/cdm_key_information.h37
-rw-r--r--chromium/media/base/cdm_promise.h12
-rw-r--r--chromium/media/base/cdm_promise_adapter.cc78
-rw-r--r--chromium/media/base/cdm_promise_adapter.h59
-rw-r--r--chromium/media/base/channel_layout.h18
-rw-r--r--chromium/media/base/channel_mixer_unittest.cc6
-rw-r--r--chromium/media/base/container_names.cc3
-rw-r--r--chromium/media/base/data_buffer.cc1
-rw-r--r--chromium/media/base/decoder_buffer.cc60
-rw-r--r--chromium/media/base/decoder_buffer.h23
-rw-r--r--chromium/media/base/decoder_buffer_unittest.cc34
-rw-r--r--chromium/media/base/decrypt_config.cc16
-rw-r--r--chromium/media/base/decrypt_config.h6
-rw-r--r--chromium/media/base/decryptor.h8
-rw-r--r--chromium/media/base/demuxer.h9
-rw-r--r--chromium/media/base/demuxer_perftest.cc17
-rw-r--r--chromium/media/base/demuxer_stream.cc6
-rw-r--r--chromium/media/base/demuxer_stream.h20
-rw-r--r--chromium/media/base/demuxer_stream_provider.h9
-rw-r--r--chromium/media/base/eme_constants.h123
-rw-r--r--chromium/media/base/fake_audio_render_callback.cc3
-rw-r--r--chromium/media/base/fake_audio_render_callback.h16
-rw-r--r--chromium/media/base/fake_demuxer_stream.cc (renamed from chromium/media/filters/fake_demuxer_stream.cc)31
-rw-r--r--chromium/media/base/fake_demuxer_stream.h (renamed from chromium/media/filters/fake_demuxer_stream.h)26
-rw-r--r--chromium/media/base/fake_demuxer_stream_unittest.cc (renamed from chromium/media/filters/fake_demuxer_stream_unittest.cc)9
-rw-r--r--chromium/media/base/fake_text_track_stream.cc9
-rw-r--r--chromium/media/base/fake_text_track_stream.h10
-rw-r--r--chromium/media/base/key_system_info.cc6
-rw-r--r--chromium/media/base/key_system_info.h23
-rw-r--r--chromium/media/base/key_systems.cc940
-rw-r--r--chromium/media/base/key_systems.h134
-rw-r--r--chromium/media/base/key_systems_support_uma.cc134
-rw-r--r--chromium/media/base/key_systems_support_uma.h57
-rw-r--r--chromium/media/base/key_systems_unittest.cc849
-rw-r--r--chromium/media/base/limits.h5
-rw-r--r--chromium/media/base/mac/BUILD.gn18
-rw-r--r--chromium/media/base/mac/avfoundation_glue.h14
-rw-r--r--chromium/media/base/mac/avfoundation_glue.mm91
-rw-r--r--chromium/media/base/mac/corevideo_glue.h3
-rw-r--r--chromium/media/base/mac/video_frame_mac.cc122
-rw-r--r--chromium/media/base/mac/video_frame_mac.h30
-rw-r--r--chromium/media/base/mac/video_frame_mac_unittests.cc133
-rw-r--r--chromium/media/base/mac/videotoolbox_glue.h4
-rw-r--r--chromium/media/base/mac/videotoolbox_glue.mm15
-rw-r--r--chromium/media/base/media_client.cc39
-rw-r--r--chromium/media/base/media_client.h72
-rw-r--r--chromium/media/base/media_keys.cc4
-rw-r--r--chromium/media/base/media_keys.h147
-rw-r--r--chromium/media/base/media_log.cc107
-rw-r--r--chromium/media/base/media_log.h89
-rw-r--r--chromium/media/base/media_log_event.h12
-rw-r--r--chromium/media/base/media_permission.cc15
-rw-r--r--chromium/media/base/media_permission.h49
-rw-r--r--chromium/media/base/media_switches.cc39
-rw-r--r--chromium/media/base/media_switches.h14
-rw-r--r--chromium/media/base/media_win.cc21
-rw-r--r--chromium/media/base/mock_audio_renderer_sink.h6
-rw-r--r--chromium/media/base/mock_filters.cc14
-rw-r--r--chromium/media/base/mock_filters.h76
-rw-r--r--chromium/media/base/moving_average.cc41
-rw-r--r--chromium/media/base/moving_average.h49
-rw-r--r--chromium/media/base/moving_average_unittest.cc36
-rw-r--r--chromium/media/base/multi_channel_resampler.cc7
-rw-r--r--chromium/media/base/multi_channel_resampler.h3
-rw-r--r--chromium/media/base/null_video_sink.cc95
-rw-r--r--chromium/media/base/null_video_sink.h95
-rw-r--r--chromium/media/base/null_video_sink_unittest.cc149
-rw-r--r--chromium/media/base/pipeline.cc97
-rw-r--r--chromium/media/base/pipeline.h53
-rw-r--r--chromium/media/base/pipeline_unittest.cc69
-rw-r--r--chromium/media/base/renderer.h43
-rw-r--r--chromium/media/base/renderer_factory.cc15
-rw-r--r--chromium/media/base/renderer_factory.h45
-rw-r--r--chromium/media/base/sample_format.cc3
-rw-r--r--chromium/media/base/sample_format.h3
-rw-r--r--chromium/media/base/scoped_histogram_timer.h32
-rw-r--r--chromium/media/base/scoped_histogram_timer_unittest.cc16
-rw-r--r--chromium/media/base/serial_runner.cc4
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc7
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb.h80
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_c.cc65
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_sse.asm2
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_x86.cc1
-rw-r--r--chromium/media/base/simd/convert_yuva_to_argb_mmx.asm2
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm2
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm1
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm2
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc2
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_sse.asm2
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm1
-rw-r--r--chromium/media/base/simd/yuv_to_rgb_table.cc669
-rw-r--r--chromium/media/base/simd/yuv_to_rgb_table.h27
-rw-r--r--chromium/media/base/sinc_resampler.cc22
-rw-r--r--chromium/media/base/sinc_resampler.h25
-rw-r--r--chromium/media/base/sinc_resampler_perftest.cc4
-rw-r--r--chromium/media/base/sinc_resampler_unittest.cc51
-rw-r--r--chromium/media/base/stream_parser.cc2
-rw-r--r--chromium/media/base/stream_parser.h31
-rw-r--r--chromium/media/base/stream_parser_buffer.cc35
-rw-r--r--chromium/media/base/stream_parser_buffer.h15
-rw-r--r--chromium/media/base/test_helpers.cc21
-rw-r--r--chromium/media/base/test_helpers.h12
-rw-r--r--chromium/media/base/text_renderer.cc1
-rw-r--r--chromium/media/base/time_delta_interpolator.cc4
-rw-r--r--chromium/media/base/time_delta_interpolator.h4
-rw-r--r--chromium/media/base/time_delta_interpolator_unittest.cc14
-rw-r--r--chromium/media/base/time_source.h46
-rw-r--r--chromium/media/base/user_input_monitor.cc1
-rw-r--r--chromium/media/base/user_input_monitor_linux.cc3
-rw-r--r--chromium/media/base/user_input_monitor_win.cc18
-rw-r--r--chromium/media/base/vector_math_perftest.cc8
-rw-r--r--chromium/media/base/video_capture_types.cc (renamed from chromium/media/video/capture/video_capture_types.cc)58
-rw-r--r--chromium/media/base/video_capture_types.h (renamed from chromium/media/video/capture/video_capture_types.h)66
-rw-r--r--chromium/media/base/video_capturer_source.cc11
-rw-r--r--chromium/media/base/video_capturer_source.h90
-rw-r--r--chromium/media/base/video_decoder.h13
-rw-r--r--chromium/media/base/video_decoder_config.cc26
-rw-r--r--chromium/media/base/video_decoder_config.h6
-rw-r--r--chromium/media/base/video_frame.cc312
-rw-r--r--chromium/media/base/video_frame.h159
-rw-r--r--chromium/media/base/video_frame_metadata.cc125
-rw-r--r--chromium/media/base/video_frame_metadata.h70
-rw-r--r--chromium/media/base/video_frame_unittest.cc140
-rw-r--r--chromium/media/base/video_renderer.h44
-rw-r--r--chromium/media/base/video_renderer_sink.h73
-rw-r--r--chromium/media/base/video_util.cc49
-rw-r--r--chromium/media/base/video_util.h27
-rw-r--r--chromium/media/base/video_util_unittest.cc44
-rw-r--r--chromium/media/base/wall_clock_time_source.cc32
-rw-r--r--chromium/media/base/wall_clock_time_source.h24
-rw-r--r--chromium/media/base/wall_clock_time_source_unittest.cc41
-rw-r--r--chromium/media/base/yuv_convert.cc176
-rw-r--r--chromium/media/base/yuv_convert.h10
-rw-r--r--chromium/media/base/yuv_convert_perftest.cc15
-rw-r--r--chromium/media/base/yuv_convert_unittest.cc5
-rw-r--r--chromium/media/blink/BUILD.gn96
-rw-r--r--chromium/media/blink/DEPS6
-rw-r--r--chromium/media/blink/buffered_data_source.cc40
-rw-r--r--chromium/media/blink/buffered_data_source.h19
-rw-r--r--chromium/media/blink/buffered_data_source_unittest.cc87
-rw-r--r--chromium/media/blink/buffered_resource_loader.cc30
-rw-r--r--chromium/media/blink/buffered_resource_loader.h15
-rw-r--r--chromium/media/blink/buffered_resource_loader_unittest.cc30
-rw-r--r--chromium/media/blink/cdm_result_promise.h19
-rw-r--r--chromium/media/blink/cdm_session_adapter.cc198
-rw-r--r--chromium/media/blink/cdm_session_adapter.h152
-rw-r--r--chromium/media/blink/encrypted_media_player_support.cc315
-rw-r--r--chromium/media/blink/encrypted_media_player_support.h110
-rw-r--r--chromium/media/blink/key_system_config_selector.cc782
-rw-r--r--chromium/media/blink/key_system_config_selector.h97
-rw-r--r--chromium/media/blink/key_system_config_selector_unittest.cc767
-rw-r--r--chromium/media/blink/media_blink.gyp20
-rw-r--r--chromium/media/blink/new_session_cdm_result_promise.cc4
-rw-r--r--chromium/media/blink/new_session_cdm_result_promise.h4
-rw-r--r--chromium/media/blink/null_encrypted_media_player_support.cc86
-rw-r--r--chromium/media/blink/null_encrypted_media_player_support.h76
-rw-r--r--chromium/media/blink/run_all_unittests.cc19
-rw-r--r--chromium/media/blink/skcanvas_video_renderer.cc (renamed from chromium/media/filters/skcanvas_video_renderer.cc)515
-rw-r--r--chromium/media/blink/skcanvas_video_renderer.h (renamed from chromium/media/filters/skcanvas_video_renderer.h)39
-rw-r--r--chromium/media/blink/skcanvas_video_renderer_unittest.cc (renamed from chromium/media/filters/skcanvas_video_renderer_unittest.cc)15
-rw-r--r--chromium/media/blink/test_response_generator.cc1
-rw-r--r--chromium/media/blink/video_frame_compositor.cc202
-rw-r--r--chromium/media/blink/video_frame_compositor.h139
-rw-r--r--chromium/media/blink/video_frame_compositor_unittest.cc320
-rw-r--r--chromium/media/blink/webcontentdecryptionmodule_impl.cc104
-rw-r--r--chromium/media/blink/webcontentdecryptionmodule_impl.h73
-rw-r--r--chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc77
-rw-r--r--chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h57
-rw-r--r--chromium/media/blink/webcontentdecryptionmodulesession_impl.cc417
-rw-r--r--chromium/media/blink/webcontentdecryptionmodulesession_impl.h87
-rw-r--r--chromium/media/blink/webencryptedmediaclient_impl.cc151
-rw-r--r--chromium/media/blink/webencryptedmediaclient_impl.h86
-rw-r--r--chromium/media/blink/webinbandtexttrack_impl.cc10
-rw-r--r--chromium/media/blink/webinbandtexttrack_impl.h6
-rw-r--r--chromium/media/blink/webmediaplayer_impl.cc470
-rw-r--r--chromium/media/blink/webmediaplayer_impl.h70
-rw-r--r--chromium/media/blink/webmediaplayer_params.cc24
-rw-r--r--chromium/media/blink/webmediaplayer_params.h61
-rw-r--r--chromium/media/blink/webmediaplayer_util.cc46
-rw-r--r--chromium/media/blink/webmediaplayer_util.h8
-rw-r--r--chromium/media/blink/webmediasource_impl.cc16
-rw-r--r--chromium/media/blink/websourcebuffer_impl.cc4
-rw-r--r--chromium/media/cast/BUILD.gn161
-rw-r--r--chromium/media/cast/OWNERS3
-rw-r--r--chromium/media/cast/cast.gyp66
-rw-r--r--chromium/media/cast/cast_config.cc17
-rw-r--r--chromium/media/cast/cast_config.h43
-rw-r--r--chromium/media/cast/cast_defines.h41
-rw-r--r--chromium/media/cast/cast_environment.h2
-rw-r--r--chromium/media/cast/cast_receiver.h7
-rw-r--r--chromium/media/cast/cast_sender.h31
-rw-r--r--chromium/media/cast/cast_sender_impl.cc81
-rw-r--r--chromium/media/cast/cast_sender_impl.h25
-rw-r--r--chromium/media/cast/cast_testing.gypi69
-rw-r--r--chromium/media/cast/cast_unittests.isolate84
-rw-r--r--chromium/media/cast/logging/encoding_event_subscriber.h6
-rw-r--r--chromium/media/cast/logging/log_deserializer.cc1
-rw-r--r--chromium/media/cast/logging/logging_impl.cc2
-rw-r--r--chromium/media/cast/logging/receiver_time_offset_estimator_impl.h8
-rw-r--r--chromium/media/cast/logging/simple_event_subscriber.cc1
-rw-r--r--chromium/media/cast/logging/simple_event_subscriber.h6
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber.cc17
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber.h8
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber_unittest.cc16
-rw-r--r--chromium/media/cast/net/cast_transport_config.h2
-rw-r--r--chromium/media/cast/net/cast_transport_sender.h21
-rw-r--r--chromium/media/cast/net/cast_transport_sender_impl.cc95
-rw-r--r--chromium/media/cast/net/cast_transport_sender_impl.h46
-rw-r--r--chromium/media/cast/net/cast_transport_sender_impl_unittest.cc10
-rw-r--r--chromium/media/cast/net/mock_cast_transport_sender.cc15
-rw-r--r--chromium/media/cast/net/mock_cast_transport_sender.h51
-rw-r--r--chromium/media/cast/net/pacing/paced_sender.h10
-rw-r--r--chromium/media/cast/net/pacing/paced_sender_unittest.cc4
-rw-r--r--chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc67
-rw-r--r--chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h41
-rw-r--r--chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc14
-rw-r--r--chromium/media/cast/net/rtcp/rtcp.cc46
-rw-r--r--chromium/media/cast/net/rtcp/rtcp.h36
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_builder.cc70
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_builder.h28
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc83
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_defines.cc12
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_defines.h31
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_unittest.cc102
-rw-r--r--chromium/media/cast/net/rtp/cast_message_builder_unittest.cc2
-rw-r--r--chromium/media/cast/net/rtp/receiver_stats.cc26
-rw-r--r--chromium/media/cast/net/rtp/receiver_stats.h8
-rw-r--r--chromium/media/cast/net/rtp/receiver_stats_unittest.cc82
-rw-r--r--chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc4
-rw-r--r--chromium/media/cast/net/rtp/rtp_parser.cc9
-rw-r--r--chromium/media/cast/net/rtp/rtp_parser.h2
-rw-r--r--chromium/media/cast/net/rtp/rtp_parser_unittest.cc2
-rw-r--r--chromium/media/cast/net/rtp/rtp_sender.cc6
-rw-r--r--chromium/media/cast/net/rtp/rtp_sender.h3
-rw-r--r--chromium/media/cast/net/udp_transport.cc44
-rw-r--r--chromium/media/cast/net/udp_transport.h16
-rw-r--r--chromium/media/cast/net/udp_transport_unittest.cc5
-rw-r--r--chromium/media/cast/receiver/audio_decoder.cc47
-rw-r--r--chromium/media/cast/receiver/audio_decoder.h8
-rw-r--r--chromium/media/cast/receiver/audio_decoder_unittest.cc6
-rw-r--r--chromium/media/cast/receiver/cast_receiver_impl.cc36
-rw-r--r--chromium/media/cast/receiver/cast_receiver_impl.h19
-rw-r--r--chromium/media/cast/receiver/frame_receiver.cc63
-rw-r--r--chromium/media/cast/receiver/frame_receiver.h16
-rw-r--r--chromium/media/cast/receiver/frame_receiver_unittest.cc36
-rw-r--r--chromium/media/cast/receiver/video_decoder.cc46
-rw-r--r--chromium/media/cast/receiver/video_decoder.h8
-rw-r--r--chromium/media/cast/receiver/video_decoder_unittest.cc69
-rw-r--r--chromium/media/cast/sender/audio_encoder.cc62
-rw-r--r--chromium/media/cast/sender/audio_encoder.h2
-rw-r--r--chromium/media/cast/sender/audio_encoder_unittest.cc8
-rw-r--r--chromium/media/cast/sender/audio_sender.cc24
-rw-r--r--chromium/media/cast/sender/audio_sender.h16
-rw-r--r--chromium/media/cast/sender/audio_sender_unittest.cc24
-rw-r--r--chromium/media/cast/sender/congestion_control.cc24
-rw-r--r--chromium/media/cast/sender/external_video_encoder.cc475
-rw-r--r--chromium/media/cast/sender/external_video_encoder.h98
-rw-r--r--chromium/media/cast/sender/external_video_encoder_unittest.cc261
-rw-r--r--chromium/media/cast/sender/fake_software_video_encoder.cc5
-rw-r--r--chromium/media/cast/sender/fake_software_video_encoder.h14
-rw-r--r--chromium/media/cast/sender/fake_video_encode_accelerator_factory.cc81
-rw-r--r--chromium/media/cast/sender/fake_video_encode_accelerator_factory.h90
-rw-r--r--chromium/media/cast/sender/frame_sender.cc13
-rw-r--r--chromium/media/cast/sender/frame_sender.h7
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder.cc763
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder.h133
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder_unittest.cc413
-rw-r--r--chromium/media/cast/sender/size_adaptable_video_encoder_base.cc168
-rw-r--r--chromium/media/cast/sender/size_adaptable_video_encoder_base.h120
-rw-r--r--chromium/media/cast/sender/video_encoder.cc66
-rw-r--r--chromium/media/cast/sender/video_encoder.h31
-rw-r--r--chromium/media/cast/sender/video_encoder_impl.cc25
-rw-r--r--chromium/media/cast/sender/video_encoder_impl.h16
-rw-r--r--chromium/media/cast/sender/video_encoder_impl_unittest.cc217
-rw-r--r--chromium/media/cast/sender/video_encoder_unittest.cc457
-rw-r--r--chromium/media/cast/sender/video_frame_factory.h51
-rw-r--r--chromium/media/cast/sender/video_sender.cc131
-rw-r--r--chromium/media/cast/sender/video_sender.h22
-rw-r--r--chromium/media/cast/sender/video_sender_unittest.cc223
-rw-r--r--chromium/media/cast/sender/vp8_encoder.cc119
-rw-r--r--chromium/media/cast/sender/vp8_encoder.h30
-rw-r--r--chromium/media/cdm/aes_decryptor.cc229
-rw-r--r--chromium/media/cdm/aes_decryptor.h61
-rw-r--r--chromium/media/cdm/aes_decryptor_unittest.cc322
-rw-r--r--chromium/media/cdm/cenc_utils.cc180
-rw-r--r--chromium/media/cdm/cenc_utils.h33
-rw-r--r--chromium/media/cdm/cenc_utils_unittest.cc376
-rw-r--r--chromium/media/cdm/default_cdm_factory.cc53
-rw-r--r--chromium/media/cdm/default_cdm_factory.h37
-rw-r--r--chromium/media/cdm/json_web_key.cc256
-rw-r--r--chromium/media/cdm/json_web_key.h29
-rw-r--r--chromium/media/cdm/json_web_key_unittest.cc413
-rw-r--r--chromium/media/cdm/key_system_names.cc9
-rw-r--r--chromium/media/cdm/key_system_names.h8
-rw-r--r--chromium/media/cdm/ppapi/BUILD.gn76
-rw-r--r--chromium/media/cdm/ppapi/api/content_decryption_module.h588
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.cc519
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.gni129
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.h210
-rw-r--r--chromium/media/cdm/ppapi/cdm_file_io_impl.cc1
-rw-r--r--chromium/media/cdm/ppapi/cdm_file_io_impl.h8
-rw-r--r--chromium/media/cdm/ppapi/cdm_file_io_test.h12
-rw-r--r--chromium/media/cdm/ppapi/cdm_helpers.cc12
-rw-r--r--chromium/media/cdm/ppapi/cdm_helpers.h63
-rw-r--r--chromium/media/cdm/ppapi/cdm_logging.cc15
-rw-r--r--chromium/media/cdm/ppapi/cdm_logging.h24
-rw-r--r--chromium/media/cdm/ppapi/cdm_wrapper.h195
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc418
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h138
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h2
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h18
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc1
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h18
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h18
-rw-r--r--chromium/media/cdm/ppapi/supported_cdm_versions.h20
-rw-r--r--chromium/media/cdm/proxy_decryptor.cc402
-rw-r--r--chromium/media/cdm/proxy_decryptor.h160
-rw-r--r--chromium/media/cdm/stub/stub_cdm.cc162
-rw-r--r--chromium/media/cdm/stub/stub_cdm.h80
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc30
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.h1
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common_unittest.cc12
-rw-r--r--chromium/media/ffmpeg/ffmpeg_regression_tests.cc515
-rw-r--r--chromium/media/ffmpeg/ffmpeg_unittest.cc589
-rw-r--r--chromium/media/filters/audio_clock.cc19
-rw-r--r--chromium/media/filters/audio_clock.h15
-rw-r--r--chromium/media/filters/audio_clock_unittest.cc56
-rw-r--r--chromium/media/filters/audio_decoder_selector_unittest.cc25
-rw-r--r--chromium/media/filters/audio_decoder_unittest.cc2
-rw-r--r--chromium/media/filters/audio_file_reader.cc15
-rw-r--r--chromium/media/filters/audio_file_reader.h1
-rw-r--r--chromium/media/filters/audio_file_reader_unittest.cc4
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.cc31
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.h13
-rw-r--r--chromium/media/filters/audio_renderer_algorithm_unittest.cc60
-rw-r--r--chromium/media/filters/chunk_demuxer.cc201
-rw-r--r--chromium/media/filters/chunk_demuxer.h31
-rw-r--r--chromium/media/filters/chunk_demuxer_unittest.cc137
-rw-r--r--chromium/media/filters/clockless_video_frame_scheduler.cc34
-rw-r--r--chromium/media/filters/clockless_video_frame_scheduler.h34
-rw-r--r--chromium/media/filters/context_3d.h36
-rw-r--r--chromium/media/filters/decoder_selector.cc37
-rw-r--r--chromium/media/filters/decoder_selector.h25
-rw-r--r--chromium/media/filters/decoder_stream.cc150
-rw-r--r--chromium/media/filters/decoder_stream.h19
-rw-r--r--chromium/media/filters/decoder_stream_traits.cc30
-rw-r--r--chromium/media/filters/decoder_stream_traits.h22
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.cc18
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.h18
-rw-r--r--chromium/media/filters/decrypting_audio_decoder_unittest.cc21
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.cc28
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.h10
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream_unittest.cc9
-rw-r--r--chromium/media/filters/decrypting_video_decoder.cc16
-rw-r--r--chromium/media/filters/decrypting_video_decoder.h9
-rw-r--r--chromium/media/filters/decrypting_video_decoder_unittest.cc10
-rw-r--r--chromium/media/filters/default_media_permission.cc49
-rw-r--r--chromium/media/filters/default_media_permission.h39
-rw-r--r--chromium/media/filters/fake_video_decoder.cc24
-rw-r--r--chromium/media/filters/fake_video_decoder.h14
-rw-r--r--chromium/media/filters/fake_video_decoder_unittest.cc48
-rw-r--r--chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc2
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.cc15
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.cc86
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.h23
-rw-r--r--chromium/media/filters/ffmpeg_demuxer_unittest.cc184
-rw-r--r--chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc2
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.cc13
-rw-r--r--chromium/media/filters/frame_processor.cc6
-rw-r--r--chromium/media/filters/frame_processor_unittest.cc15
-rw-r--r--chromium/media/filters/gpu_video_accelerator_factories.cc11
-rw-r--r--chromium/media/filters/gpu_video_decoder.cc149
-rw-r--r--chromium/media/filters/gpu_video_decoder.h16
-rw-r--r--chromium/media/filters/h264_bitstream_buffer.h4
-rw-r--r--chromium/media/filters/h264_parser.cc15
-rw-r--r--chromium/media/filters/h264_parser.h8
-rw-r--r--chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc2
-rw-r--r--chromium/media/filters/jpeg_parser.cc395
-rw-r--r--chromium/media/filters/jpeg_parser.h83
-rw-r--r--chromium/media/filters/jpeg_parser_unittest.cc112
-rw-r--r--chromium/media/filters/mock_gpu_video_accelerator_factories.cc23
-rw-r--r--chromium/media/filters/pipeline_integration_perftest.cc89
-rw-r--r--chromium/media/filters/pipeline_integration_test.cc1615
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.cc357
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.h151
-rw-r--r--chromium/media/filters/source_buffer_platform_lowmem.cc14
-rw-r--r--chromium/media/filters/source_buffer_range.cc53
-rw-r--r--chromium/media/filters/source_buffer_range.h23
-rw-r--r--chromium/media/filters/source_buffer_stream.cc243
-rw-r--r--chromium/media/filters/source_buffer_stream.h25
-rw-r--r--chromium/media/filters/source_buffer_stream_unittest.cc240
-rw-r--r--chromium/media/filters/stream_parser_factory.cc56
-rw-r--r--chromium/media/filters/test_video_frame_scheduler.cc66
-rw-r--r--chromium/media/filters/test_video_frame_scheduler.h57
-rw-r--r--chromium/media/filters/video_cadence_estimator.cc261
-rw-r--r--chromium/media/filters/video_cadence_estimator.h199
-rw-r--r--chromium/media/filters/video_cadence_estimator_unittest.cc204
-rw-r--r--chromium/media/filters/video_decoder_selector_unittest.cc24
-rw-r--r--chromium/media/filters/video_frame_scheduler.h48
-rw-r--r--chromium/media/filters/video_frame_scheduler_impl.cc105
-rw-r--r--chromium/media/filters/video_frame_scheduler_impl.h74
-rw-r--r--chromium/media/filters/video_frame_scheduler_impl_unittest.cc150
-rw-r--r--chromium/media/filters/video_frame_scheduler_proxy.cc48
-rw-r--r--chromium/media/filters/video_frame_scheduler_proxy.h51
-rw-r--r--chromium/media/filters/video_frame_scheduler_unittest.cc80
-rw-r--r--chromium/media/filters/video_frame_stream_unittest.cc180
-rw-r--r--chromium/media/filters/video_renderer_algorithm.cc689
-rw-r--r--chromium/media/filters/video_renderer_algorithm.h312
-rw-r--r--chromium/media/filters/video_renderer_algorithm_unittest.cc1284
-rw-r--r--chromium/media/filters/video_renderer_impl.cc450
-rw-r--r--chromium/media/filters/vp8_bool_decoder.cc206
-rw-r--r--chromium/media/filters/vp8_bool_decoder.h133
-rw-r--r--chromium/media/filters/vp8_bool_decoder_unittest.cc118
-rw-r--r--chromium/media/filters/vp8_parser.cc872
-rw-r--r--chromium/media/filters/vp8_parser.h195
-rw-r--r--chromium/media/filters/vp8_parser_unittest.cc59
-rw-r--r--chromium/media/filters/vpx_video_decoder.cc85
-rw-r--r--chromium/media/formats/common/offset_byte_queue.cc1
-rw-r--r--chromium/media/formats/common/stream_parser_test_base.cc11
-rw-r--r--chromium/media/formats/common/stream_parser_test_base.h5
-rw-r--r--chromium/media/formats/mp2t/es_adapter_video.cc7
-rw-r--r--chromium/media/formats/mp2t/es_adapter_video_unittest.cc3
-rw-r--r--chromium/media/formats/mp2t/es_parser_adts.cc1
-rw-r--r--chromium/media/formats/mp2t/es_parser_adts.h8
-rw-r--r--chromium/media/formats/mp2t/es_parser_adts_unittest.cc1
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264.cc6
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264.h8
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264_unittest.cc1
-rw-r--r--chromium/media/formats/mp2t/es_parser_mpeg1audio.cc1
-rw-r--r--chromium/media/formats/mp2t/es_parser_mpeg1audio.h8
-rw-r--r--chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc1
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser.cc18
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser.h25
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc9
-rw-r--r--chromium/media/formats/mp2t/ts_section_pat.h6
-rw-r--r--chromium/media/formats/mp2t/ts_section_pes.h8
-rw-r--r--chromium/media/formats/mp2t/ts_section_pmt.h6
-rw-r--r--chromium/media/formats/mp2t/ts_section_psi.h8
-rw-r--r--chromium/media/formats/mp4/aac.cc5
-rw-r--r--chromium/media/formats/mp4/avc.cc1
-rw-r--r--chromium/media/formats/mp4/box_definitions.cc32
-rw-r--r--chromium/media/formats/mp4/box_definitions.h7
-rw-r--r--chromium/media/formats/mp4/box_reader.cc10
-rw-r--r--chromium/media/formats/mp4/box_reader.h3
-rw-r--r--chromium/media/formats/mp4/box_reader_unittest.cc14
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.cc79
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.h28
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser_unittest.cc12
-rw-r--r--chromium/media/formats/mp4/rcheck.h26
-rw-r--r--chromium/media/formats/mp4/track_run_iterator.cc7
-rw-r--r--chromium/media/formats/mpeg/adts_stream_parser.cc11
-rw-r--r--chromium/media/formats/mpeg/adts_stream_parser_unittest.cc4
-rw-r--r--chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc35
-rw-r--r--chromium/media/formats/mpeg/mpeg1_audio_stream_parser_unittest.cc4
-rw-r--r--chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc29
-rw-r--r--chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h2
-rw-r--r--chromium/media/formats/webm/opus_packet_builder.cc89
-rw-r--r--chromium/media/formats/webm/opus_packet_builder.h43
-rw-r--r--chromium/media/formats/webm/webm_audio_client.cc15
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser.cc295
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser.h91
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser_unittest.cc357
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client.cc41
-rw-r--r--chromium/media/formats/webm/webm_crypto_helpers.h2
-rw-r--r--chromium/media/formats/webm/webm_stream_parser.cc43
-rw-r--r--chromium/media/formats/webm/webm_stream_parser.h8
-rw-r--r--chromium/media/formats/webm/webm_tracks_parser.cc46
-rw-r--r--chromium/media/formats/webm/webm_video_client.cc10
-rw-r--r--chromium/media/media.gyp748
-rw-r--r--chromium/media/media_cdm.gypi16
-rw-r--r--chromium/media/media_cdm_adapter.gyp2
-rw-r--r--chromium/media/media_nacl.gyp6
-rw-r--r--chromium/media/media_options.gni5
-rw-r--r--chromium/media/media_unittests.isolate57
-rw-r--r--chromium/media/midi/BUILD.gn173
-rw-r--r--chromium/media/midi/midi.gyp237
-rw-r--r--chromium/media/midi/midi_export.h29
-rw-r--r--chromium/media/midi/midi_manager.cc33
-rw-r--r--chromium/media/midi/midi_manager.h22
-rw-r--r--chromium/media/midi/midi_manager_alsa.cc1519
-rw-r--r--chromium/media/midi/midi_manager_alsa.h390
-rw-r--r--chromium/media/midi/midi_manager_alsa_unittest.cc700
-rw-r--r--chromium/media/midi/midi_manager_android.cc2
-rw-r--r--chromium/media/midi/midi_manager_mac.cc215
-rw-r--r--chromium/media/midi/midi_manager_mac.h30
-rw-r--r--chromium/media/midi/midi_manager_mac_unittest.cc162
-rw-r--r--chromium/media/midi/midi_manager_unittest.cc12
-rw-r--r--chromium/media/midi/midi_manager_usb.cc124
-rw-r--r--chromium/media/midi/midi_manager_usb.h19
-rw-r--r--chromium/media/midi/midi_manager_usb_unittest.cc268
-rw-r--r--chromium/media/midi/midi_manager_win.cc1291
-rw-r--r--chromium/media/midi/midi_manager_win.h61
-rw-r--r--chromium/media/midi/midi_message_queue.cc9
-rw-r--r--chromium/media/midi/midi_message_queue.h6
-rw-r--r--chromium/media/midi/midi_message_queue_unittest.cc26
-rw-r--r--chromium/media/midi/midi_message_util.cc2
-rw-r--r--chromium/media/midi/midi_message_util.h6
-rw-r--r--chromium/media/midi/midi_message_util_unittest.cc2
-rw-r--r--chromium/media/midi/midi_port_info.cc11
-rw-r--r--chromium/media/midi/midi_port_info.h19
-rw-r--r--chromium/media/midi/midi_result.h2
-rw-r--r--chromium/media/midi/midi_scheduler.cc57
-rw-r--r--chromium/media/midi/midi_scheduler.h47
-rw-r--r--chromium/media/midi/midi_unittests.isolate63
-rw-r--r--chromium/media/midi/usb_midi_descriptor_parser.cc55
-rw-r--r--chromium/media/midi/usb_midi_descriptor_parser.h29
-rw-r--r--chromium/media/midi/usb_midi_descriptor_parser_unittest.cc40
-rw-r--r--chromium/media/midi/usb_midi_device.h28
-rw-r--r--chromium/media/midi/usb_midi_device_android.cc107
-rw-r--r--chromium/media/midi/usb_midi_device_android.h27
-rw-r--r--chromium/media/midi/usb_midi_device_factory_android.cc31
-rw-r--r--chromium/media/midi/usb_midi_device_factory_android.h15
-rw-r--r--chromium/media/midi/usb_midi_export.h33
-rw-r--r--chromium/media/midi/usb_midi_input_stream.cc41
-rw-r--r--chromium/media/midi/usb_midi_input_stream.h16
-rw-r--r--chromium/media/midi/usb_midi_input_stream_unittest.cc45
-rw-r--r--chromium/media/midi/usb_midi_jack.h6
-rw-r--r--chromium/media/midi/usb_midi_output_stream.cc4
-rw-r--r--chromium/media/midi/usb_midi_output_stream.h6
-rw-r--r--chromium/media/midi/usb_midi_output_stream_unittest.cc7
-rw-r--r--chromium/media/mojo/BUILD.gn4
-rw-r--r--chromium/media/mojo/DEPS3
-rw-r--r--chromium/media/mojo/interfaces/BUILD.gn15
-rw-r--r--chromium/media/mojo/interfaces/content_decryption_module.mojom144
-rw-r--r--chromium/media/mojo/interfaces/decryptor.mojom73
-rw-r--r--chromium/media/mojo/interfaces/demuxer_stream.mojom54
-rw-r--r--chromium/media/mojo/interfaces/media_renderer.mojom7
-rw-r--r--chromium/media/mojo/interfaces/media_types.mojom57
-rw-r--r--chromium/media/mojo/interfaces/mojo_bindings.gyp30
-rw-r--r--chromium/media/mojo/interfaces/platform_verification.mojom29
-rwxr-xr-xchromium/media/mojo/scripts/run_mojo_media_renderer.py61
-rw-r--r--chromium/media/mojo/services/BUILD.gn185
-rw-r--r--chromium/media/mojo/services/demuxer_stream_provider_shim.cc6
-rw-r--r--chromium/media/mojo/services/demuxer_stream_provider_shim.h1
-rw-r--r--chromium/media/mojo/services/media_type_converters.cc214
-rw-r--r--chromium/media/mojo/services/media_type_converters.h37
-rw-r--r--chromium/media/mojo/services/media_type_converters_unittest.cc81
-rw-r--r--chromium/media/mojo/services/mojo_cdm.cc164
-rw-r--r--chromium/media/mojo/services/mojo_cdm.h106
-rw-r--r--chromium/media/mojo/services/mojo_cdm_promise.cc64
-rw-r--r--chromium/media/mojo/services/mojo_cdm_promise.h41
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service.cc149
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service.h82
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc121
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_adapter.h36
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_impl.cc116
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_impl.h21
-rw-r--r--chromium/media/mojo/services/mojo_media_application.cc47
-rw-r--r--chromium/media/mojo/services/mojo_renderer_factory.cc31
-rw-r--r--chromium/media/mojo/services/mojo_renderer_factory.h47
-rw-r--r--chromium/media/mojo/services/mojo_renderer_impl.cc63
-rw-r--r--chromium/media/mojo/services/mojo_renderer_impl.h30
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.cc126
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.h34
-rw-r--r--chromium/media/mojo/services/mojo_type_trait.h26
-rw-r--r--chromium/media/mojo/services/renderer_config.cc4
-rw-r--r--chromium/media/mojo/services/renderer_config.h6
-rw-r--r--chromium/media/mojo/services/renderer_config_default.cc26
-rw-r--r--chromium/media/mojo/services/renderer_unittest.cc184
-rw-r--r--chromium/media/ozone/OWNERS2
-rw-r--r--chromium/media/ozone/media_ozone_platform.cc12
-rw-r--r--chromium/media/renderers/audio_renderer_impl.cc (renamed from chromium/media/filters/audio_renderer_impl.cc)137
-rw-r--r--chromium/media/renderers/audio_renderer_impl.h (renamed from chromium/media/filters/audio_renderer_impl.h)38
-rw-r--r--chromium/media/renderers/audio_renderer_impl_unittest.cc (renamed from chromium/media/filters/audio_renderer_impl_unittest.cc)91
-rw-r--r--chromium/media/renderers/default_renderer_factory.cc88
-rw-r--r--chromium/media/renderers/default_renderer_factory.h47
-rw-r--r--chromium/media/renderers/gpu_video_accelerator_factories.h (renamed from chromium/media/filters/gpu_video_accelerator_factories.h)37
-rw-r--r--chromium/media/renderers/mock_gpu_video_accelerator_factories.cc74
-rw-r--r--chromium/media/renderers/mock_gpu_video_accelerator_factories.h (renamed from chromium/media/filters/mock_gpu_video_accelerator_factories.h)36
-rw-r--r--chromium/media/renderers/renderer_impl.cc (renamed from chromium/media/filters/renderer_impl.cc)279
-rw-r--r--chromium/media/renderers/renderer_impl.h (renamed from chromium/media/filters/renderer_impl.h)75
-rw-r--r--chromium/media/renderers/renderer_impl_unittest.cc (renamed from chromium/media/filters/renderer_impl_unittest.cc)225
-rw-r--r--chromium/media/renderers/video_renderer_impl.cc756
-rw-r--r--chromium/media/renderers/video_renderer_impl.h (renamed from chromium/media/filters/video_renderer_impl.h)115
-rw-r--r--chromium/media/renderers/video_renderer_impl_unittest.cc (renamed from chromium/media/filters/video_renderer_impl_unittest.cc)344
-rw-r--r--chromium/media/tools/player_x11/data_source_logger.cc59
-rw-r--r--chromium/media/tools/player_x11/data_source_logger.h41
-rw-r--r--chromium/media/tools/player_x11/gl_video_renderer.cc251
-rw-r--r--chromium/media/tools/player_x11/gl_video_renderer.h43
-rw-r--r--chromium/media/tools/player_x11/player_x11.cc318
-rw-r--r--chromium/media/tools/player_x11/x11_video_renderer.cc215
-rw-r--r--chromium/media/tools/player_x11/x11_video_renderer.h47
-rw-r--r--chromium/media/video/capture/OWNERS2
-rw-r--r--chromium/media/video/capture/android/OWNERS1
-rw-r--r--chromium/media/video/capture/android/imageformat_list.h18
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.cc31
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.h12
-rw-r--r--chromium/media/video/capture/android/video_capture_device_factory_android.cc29
-rw-r--r--chromium/media/video/capture/android/video_capture_device_factory_android.h8
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.cc277
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.h50
-rw-r--r--chromium/media/video/capture/fake_video_capture_device_factory.cc41
-rw-r--r--chromium/media/video/capture/fake_video_capture_device_unittest.cc203
-rw-r--r--chromium/media/video/capture/file_video_capture_device.cc26
-rw-r--r--chromium/media/video/capture/file_video_capture_device.h2
-rw-r--r--chromium/media/video/capture/file_video_capture_device_factory.cc11
-rw-r--r--chromium/media/video/capture/linux/v4l2_capture_delegate.cc420
-rw-r--r--chromium/media/video/capture/linux/v4l2_capture_delegate.h150
-rw-r--r--chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.cc99
-rw-r--r--chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.h59
-rw-r--r--chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.cc60
-rw-r--r--chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.h54
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_chromeos.cc17
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_chromeos.h2
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_factory_linux.cc230
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_factory_linux.h2
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.cc447
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.h55
-rw-r--r--chromium/media/video/capture/mac/OWNERS1
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h2
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm130
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_factory_mac.mm41
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm13
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.h2
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.mm23
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm4
-rw-r--r--chromium/media/video/capture/video_capture_device.cc63
-rw-r--r--chromium/media/video/capture/video_capture_device.h197
-rw-r--r--chromium/media/video/capture/video_capture_device_factory.cc41
-rw-r--r--chromium/media/video/capture/video_capture_device_factory.h10
-rw-r--r--chromium/media/video/capture/video_capture_device_info.h2
-rw-r--r--chromium/media/video/capture/video_capture_device_unittest.cc220
-rw-r--r--chromium/media/video/capture/win/capability_list_win.cc13
-rw-r--r--chromium/media/video/capture/win/capability_list_win.h26
-rw-r--r--chromium/media/video/capture/win/filter_base_win.cc30
-rw-r--r--chromium/media/video/capture/win/filter_base_win.h35
-rw-r--r--chromium/media/video/capture/win/pin_base_win.cc36
-rw-r--r--chromium/media/video/capture/win/pin_base_win.h55
-rw-r--r--chromium/media/video/capture/win/sink_filter_win.cc16
-rw-r--r--chromium/media/video/capture/win/sink_filter_win.h15
-rw-r--r--chromium/media/video/capture/win/sink_input_pin_win.cc180
-rw-r--r--chromium/media/video/capture/win/sink_input_pin_win.h19
-rw-r--r--chromium/media/video/capture/win/video_capture_device_factory_win.cc72
-rw-r--r--chromium/media/video/capture/win/video_capture_device_factory_win.h8
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.cc35
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.h9
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.cc321
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.h27
-rw-r--r--chromium/media/video/fake_video_encode_accelerator.cc140
-rw-r--r--chromium/media/video/fake_video_encode_accelerator.h84
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool.cc352
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool.h50
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc180
-rw-r--r--chromium/media/video/h264_poc.cc225
-rw-r--r--chromium/media/video/h264_poc.h40
-rw-r--r--chromium/media/video/h264_poc_unittest.cc245
-rw-r--r--chromium/media/video/jpeg_decode_accelerator.h134
-rw-r--r--chromium/media/video/picture.cc17
-rw-r--r--chromium/media/video/picture.h15
-rw-r--r--chromium/media/video/video_decode_accelerator.cc12
-rw-r--r--chromium/media/video/video_decode_accelerator.h19
-rw-r--r--chromium/media/video/video_encode_accelerator.cc9
-rw-r--r--chromium/media/video/video_encode_accelerator.h7
826 files changed, 45958 insertions, 19032 deletions
diff --git a/chromium/media/BUILD.gn b/chromium/media/BUILD.gn
index 2353ce4ef0d..de0e04fbc5d 100644
--- a/chromium/media/BUILD.gn
+++ b/chromium/media/BUILD.gn
@@ -8,12 +8,19 @@ import("//build/config/features.gni")
import("//build/config/linux/pkg_config.gni")
import("//build/config/ui.gni")
import("//media/media_options.gni")
+import("//testing/test.gni")
# Common configuration for targets in the media directory.
# NOT for exporting.
config("media_config") {
- defines = [ "MEDIA_IMPLEMENTATION" ]
- if (cpu_arch == "arm" && arm_use_neon) {
+ defines = []
+ if (!media_use_libvpx) {
+ defines += [ "MEDIA_DISABLE_LIBVPX" ]
+ }
+ if (!media_use_ffmpeg) {
+ defines += [ "MEDIA_DISABLE_FFMPEG" ]
+ }
+ if (current_cpu == "arm" && arm_use_neon) {
defines += [ "USE_NEON" ]
}
if (use_pulseaudio) {
@@ -27,11 +34,18 @@ config("media_config") {
}
}
+config("media_implementation") {
+ defines = [ "MEDIA_IMPLEMENTATION" ]
+}
+
config("media_dependent_config") {
defines = []
if (!media_use_libvpx) {
defines += [ "MEDIA_DISABLE_LIBVPX" ]
}
+ if (!media_use_ffmpeg) {
+ defines += [ "MEDIA_DISABLE_FFMPEG" ]
+ }
if (is_win) {
ldflags = [
"/DELAYLOAD:mf.dll",
@@ -49,39 +63,51 @@ if (use_ozone) {
script = "../ui/ozone/generate_constructor_list.py"
platform_list_txt_file = "$target_gen_dir/../ui/ozone/platform_list.txt"
- constructor_list_cc_file = "$target_gen_dir/../media/ozone/constructor_list.cc"
+ constructor_list_cc_file =
+ "$target_gen_dir/../media/ozone/constructor_list.cc"
- sources = [ platform_list_txt_file ]
- outputs = [ constructor_list_cc_file ]
+ sources = [
+ platform_list_txt_file,
+ ]
+ outputs = [
+ constructor_list_cc_file,
+ ]
args = [
"--platform_list=" + rebase_path(platform_list_txt_file, root_build_dir),
"--output_cc=" + rebase_path(constructor_list_cc_file, root_build_dir),
"--namespace=media",
"--typename=MediaOzonePlatform",
- "--include=\"media/ozone/media_ozone_platform.h\""
+ "--include=\"media/ozone/media_ozone_platform.h\"",
]
}
}
component("media") {
sources = [
+ "blink/skcanvas_video_renderer.cc",
+ "blink/skcanvas_video_renderer.h",
"cdm/aes_decryptor.cc",
"cdm/aes_decryptor.h",
+ "cdm/cenc_utils.cc",
+ "cdm/cenc_utils.h",
+ "cdm/default_cdm_factory.cc",
+ "cdm/default_cdm_factory.h",
"cdm/json_web_key.cc",
"cdm/json_web_key.h",
"cdm/key_system_names.cc",
"cdm/key_system_names.h",
"cdm/player_tracker_impl.cc",
"cdm/player_tracker_impl.h",
+ "cdm/proxy_decryptor.cc",
+ "cdm/proxy_decyrptor.h",
"ffmpeg/ffmpeg_deleters.h",
"filters/audio_clock.cc",
"filters/audio_clock.h",
"filters/audio_renderer_algorithm.cc",
"filters/audio_renderer_algorithm.h",
- "filters/audio_renderer_impl.cc",
- "filters/audio_renderer_impl.h",
"filters/chunk_demuxer.cc",
"filters/chunk_demuxer.h",
+ "filters/context_3d.h",
"filters/decoder_selector.cc",
"filters/decoder_selector.h",
"filters/decoder_stream.cc",
@@ -94,22 +120,20 @@ component("media") {
"filters/decrypting_demuxer_stream.h",
"filters/decrypting_video_decoder.cc",
"filters/decrypting_video_decoder.h",
+ "filters/default_media_permission.cc",
+ "filters/default_media_permission.h",
"filters/file_data_source.cc",
"filters/file_data_source.h",
"filters/frame_processor.cc",
"filters/frame_processor.h",
- "filters/gpu_video_accelerator_factories.cc",
- "filters/gpu_video_accelerator_factories.h",
"filters/gpu_video_decoder.cc",
"filters/gpu_video_decoder.h",
"filters/h264_bit_reader.cc",
"filters/h264_bit_reader.h",
"filters/h264_parser.cc",
"filters/h264_parser.h",
- "filters/renderer_impl.cc",
- "filters/renderer_impl.h",
- "filters/skcanvas_video_renderer.cc",
- "filters/skcanvas_video_renderer.h",
+ "filters/jpeg_parser.cc",
+ "filters/jpeg_parser.h",
"filters/source_buffer_platform.cc",
"filters/source_buffer_platform.h",
"filters/source_buffer_range.cc",
@@ -118,52 +142,70 @@ component("media") {
"filters/source_buffer_stream.h",
"filters/stream_parser_factory.cc",
"filters/stream_parser_factory.h",
- "filters/video_frame_scheduler.h",
- "filters/video_frame_scheduler_impl.cc",
- "filters/video_frame_scheduler_impl.h",
- "filters/video_frame_scheduler_proxy.cc",
- "filters/video_frame_scheduler_proxy.h",
- "filters/video_renderer_impl.cc",
- "filters/video_renderer_impl.h",
+ "filters/video_cadence_estimator.cc",
+ "filters/video_cadence_estimator.h",
+ "filters/video_renderer_algorithm.cc",
+ "filters/video_renderer_algorithm.h",
+ "filters/vp8_bool_decoder.cc",
+ "filters/vp8_bool_decoder.h",
+ "filters/vp8_parser.cc",
+ "filters/vp8_parser.h",
"filters/webvtt_util.h",
"filters/wsola_internals.cc",
"filters/wsola_internals.h",
- "midi/midi_manager.cc",
- "midi/midi_manager.h",
- "midi/midi_manager_mac.cc",
- "midi/midi_manager_mac.h",
- "midi/midi_manager_usb.cc",
- "midi/midi_manager_usb.h",
- "midi/midi_manager_win.cc",
- "midi/midi_manager_win.h",
- "midi/midi_message_queue.cc",
- "midi/midi_message_queue.h",
- "midi/midi_message_util.cc",
- "midi/midi_message_util.h",
- "midi/midi_port_info.cc",
- "midi/midi_port_info.h",
- "midi/usb_midi_descriptor_parser.cc",
- "midi/usb_midi_descriptor_parser.h",
- "midi/usb_midi_device.h",
- "midi/usb_midi_input_stream.cc",
- "midi/usb_midi_input_stream.h",
- "midi/usb_midi_jack.h",
- "midi/usb_midi_output_stream.cc",
- "midi/usb_midi_output_stream.h",
+ "formats/common/offset_byte_queue.cc",
+ "formats/common/offset_byte_queue.h",
+ "formats/webm/webm_audio_client.cc",
+ "formats/webm/webm_audio_client.h",
+ "formats/webm/webm_cluster_parser.cc",
+ "formats/webm/webm_cluster_parser.h",
+ "formats/webm/webm_constants.cc",
+ "formats/webm/webm_constants.h",
+ "formats/webm/webm_content_encodings.cc",
+ "formats/webm/webm_content_encodings.h",
+ "formats/webm/webm_content_encodings_client.cc",
+ "formats/webm/webm_content_encodings_client.h",
+ "formats/webm/webm_crypto_helpers.cc",
+ "formats/webm/webm_crypto_helpers.h",
+ "formats/webm/webm_info_parser.cc",
+ "formats/webm/webm_info_parser.h",
+ "formats/webm/webm_parser.cc",
+ "formats/webm/webm_parser.h",
+ "formats/webm/webm_stream_parser.cc",
+ "formats/webm/webm_stream_parser.h",
+ "formats/webm/webm_tracks_parser.cc",
+ "formats/webm/webm_tracks_parser.h",
+ "formats/webm/webm_video_client.cc",
+ "formats/webm/webm_video_client.h",
+ "formats/webm/webm_webvtt_parser.cc",
+ "formats/webm/webm_webvtt_parser.h",
+ "renderers/audio_renderer_impl.cc",
+ "renderers/audio_renderer_impl.h",
+ "renderers/gpu_video_accelerator_factories.h",
+ "renderers/renderer_impl.cc",
+ "renderers/renderer_impl.h",
+ "renderers/video_renderer_impl.cc",
+ "renderers/video_renderer_impl.h",
"video/capture/fake_video_capture_device.cc",
"video/capture/fake_video_capture_device.h",
- "video/capture/fake_video_capture_device_factory.h",
"video/capture/fake_video_capture_device_factory.cc",
+ "video/capture/fake_video_capture_device_factory.h",
"video/capture/file_video_capture_device.cc",
"video/capture/file_video_capture_device.h",
- "video/capture/file_video_capture_device_factory.h",
"video/capture/file_video_capture_device_factory.cc",
+ "video/capture/file_video_capture_device_factory.h",
+ "video/capture/linux/v4l2_capture_delegate.cc",
+ "video/capture/linux/v4l2_capture_delegate.h",
+ "video/capture/linux/v4l2_capture_delegate_multi_plane.cc",
+ "video/capture/linux/v4l2_capture_delegate_multi_plane.h",
+ "video/capture/linux/v4l2_capture_delegate_single_plane.cc",
+ "video/capture/linux/v4l2_capture_delegate_single_plane.h",
+ "video/capture/linux/video_capture_device_chromeos.cc",
+ "video/capture/linux/video_capture_device_chromeos.h",
"video/capture/linux/video_capture_device_factory_linux.cc",
"video/capture/linux/video_capture_device_factory_linux.h",
"video/capture/linux/video_capture_device_linux.cc",
"video/capture/linux/video_capture_device_linux.h",
- "video/capture/linux/video_capture_device_chromeos.cc",
- "video/capture/linux/video_capture_device_chromeos.h",
"video/capture/mac/platform_video_capturing_mac.h",
"video/capture/mac/video_capture_device_avfoundation_mac.h",
"video/capture/mac/video_capture_device_avfoundation_mac.mm",
@@ -181,8 +223,6 @@ component("media") {
"video/capture/video_capture_device_factory.h",
"video/capture/video_capture_device_info.cc",
"video/capture/video_capture_device_info.h",
- "video/capture/video_capture_types.cc",
- "video/capture/video_capture_types.h",
"video/capture/win/capability_list_win.cc",
"video/capture/win/capability_list_win.h",
"video/capture/win/filter_base_win.cc",
@@ -200,43 +240,31 @@ component("media") {
"video/capture/win/video_capture_device_mf_win.h",
"video/capture/win/video_capture_device_win.cc",
"video/capture/win/video_capture_device_win.h",
+ "video/fake_video_encode_accelerator.cc",
+ "video/fake_video_encode_accelerator.h",
+ "video/gpu_memory_buffer_video_frame_pool.cc",
+ "video/gpu_memory_buffer_video_frame_pool.h",
+ "video/h264_poc.cc",
+ "video/h264_poc.h",
"video/picture.cc",
"video/picture.h",
"video/video_decode_accelerator.cc",
"video/video_decode_accelerator.h",
"video/video_encode_accelerator.cc",
"video/video_encode_accelerator.h",
- "formats/common/offset_byte_queue.cc",
- "formats/common/offset_byte_queue.h",
- "formats/webm/webm_audio_client.cc",
- "formats/webm/webm_audio_client.h",
- "formats/webm/webm_cluster_parser.cc",
- "formats/webm/webm_cluster_parser.h",
- "formats/webm/webm_constants.cc",
- "formats/webm/webm_constants.h",
- "formats/webm/webm_content_encodings.cc",
- "formats/webm/webm_content_encodings.h",
- "formats/webm/webm_content_encodings_client.cc",
- "formats/webm/webm_content_encodings_client.h",
- "formats/webm/webm_crypto_helpers.cc",
- "formats/webm/webm_crypto_helpers.h",
- "formats/webm/webm_info_parser.cc",
- "formats/webm/webm_info_parser.h",
- "formats/webm/webm_parser.cc",
- "formats/webm/webm_parser.h",
- "formats/webm/webm_stream_parser.cc",
- "formats/webm/webm_stream_parser.h",
- "formats/webm/webm_tracks_parser.cc",
- "formats/webm/webm_tracks_parser.h",
- "formats/webm/webm_video_client.cc",
- "formats/webm/webm_video_client.h",
- "formats/webm/webm_webvtt_parser.cc",
- "formats/webm/webm_webvtt_parser.h",
]
- configs += [ ":media_config", ]
+ configs += [
+ ":media_config",
+ ":media_implementation",
+
+ # TODO(wolenetz): Fix size_t to int trunctaion in win64.
+ # See http://crbug.com/171009
+ "//build/config/compiler:no_size_t_to_int_warning",
+ ]
all_dependent_configs = [ ":media_dependent_config" ]
+ allow_circular_includes_from = []
cflags = []
libs = []
defines = []
@@ -274,30 +302,20 @@ component("media") {
}
}
- if (cpu_arch == "arm" && arm_use_neon) {
+ if (current_cpu == "arm" && arm_use_neon) {
defines += [ "USE_NEON" ]
}
if (media_use_libvpx) {
- sources += [
- "filters/vpx_video_decoder.cc",
- "filters/vpx_video_decoder.h",
- ]
- deps += [ "//third_party/libvpx" ]
- }
-
- if (!is_android) {
sources += [
- "filters/opus_audio_decoder.cc",
- "filters/opus_audio_decoder.h",
+ "filters/vpx_video_decoder.cc",
+ "filters/vpx_video_decoder.h",
]
- } else {
+ deps += [ "//third_party/libvpx" ]
+ }
+
+ if (is_android) {
sources += [
- "midi/midi_manager_android.cc",
- "midi/usb_midi_device_android.cc",
- "midi/usb_midi_device_android.h",
- "midi/usb_midi_device_factory_android.cc",
- "midi/usb_midi_device_factory_android.h",
"video/capture/android/video_capture_device_android.cc",
"video/capture/android/video_capture_device_android.h",
"video/capture/android/video_capture_device_factory_android.cc",
@@ -308,24 +326,20 @@ component("media") {
"//media/base/android:media_jni_headers",
"//media/base/android:video_capture_jni_headers",
]
- if (!is_android_webview_build) {
- deps += [ "//media/base/android:media_java" ]
- }
- }
-
- if (cpu_arch != "arm" && is_chromeos && use_x11) {
+ allow_circular_includes_from = [ "//media/base/android" ]
+ } else {
sources += [
- "filters/h264_bitstream_buffer.cc",
- "filters/h264_bitstream_buffer.h",
+ "filters/opus_audio_decoder.cc",
+ "filters/opus_audio_decoder.h",
+ "renderers/default_renderer_factory.cc",
+ "renderers/default_renderer_factory.h",
]
}
- if (use_alsa) {
- libs += [ "asound" ]
- defines += [ "USE_ALSA" ]
+ if (current_cpu != "arm" && is_chromeos) {
sources += [
- "midi/midi_manager_alsa.cc",
- "midi/midi_manager_alsa.h",
+ "filters/h264_bitstream_buffer.cc",
+ "filters/h264_bitstream_buffer.h",
]
}
@@ -336,9 +350,9 @@ component("media") {
include_dirs += [ target_gen_dir ]
sources += [
- "ozone/media_ozone_platform.cc",
- "ozone/media_ozone_platform.h",
- ] + get_target_outputs(":generate_ozone_constructor_list")
+ "ozone/media_ozone_platform.cc",
+ "ozone/media_ozone_platform.h",
+ ] + get_target_outputs(":generate_ozone_constructor_list")
deps += [
":generate_ozone_constructor_list",
@@ -352,19 +366,23 @@ component("media") {
"//third_party/decklink",
]
libs += [
- "CoreMIDI.framework",
"CoreVideo.framework",
"OpenGL.framework",
"QTKit.framework",
]
}
- if (is_ios) {
- deps += [
- "//media/base/mac",
+ if (is_openbsd) {
+ sources -= [
+ "video/capture/linux/v4l2_capture_delegate_multi_plane.cc",
+ "video/capture/linux/v4l2_capture_delegate_multi_plane.h",
]
}
+ if (is_ios) {
+ deps += [ "//media/base/mac" ]
+ }
+
if (is_win) {
libs += [
"mf.lib",
@@ -372,10 +390,6 @@ component("media") {
"mfreadwrite.lib",
"mfuuid.lib",
]
- cflags += [
- "/wd4267" # TODO(wolenetz): Fix size_t to int trunctaion in win64. See
- # http://crbug.com/171009
- ]
}
if (proprietary_codecs) {
@@ -430,10 +444,10 @@ component("media") {
"formats/mpeg/adts_constants.h",
"formats/mpeg/adts_stream_parser.cc",
"formats/mpeg/adts_stream_parser.h",
- "formats/mpeg/mpeg_audio_stream_parser_base.cc",
- "formats/mpeg/mpeg_audio_stream_parser_base.h",
"formats/mpeg/mpeg1_audio_stream_parser.cc",
"formats/mpeg/mpeg1_audio_stream_parser.h",
+ "formats/mpeg/mpeg_audio_stream_parser_base.cc",
+ "formats/mpeg/mpeg_audio_stream_parser_base.h",
]
}
@@ -463,7 +477,10 @@ component("media") {
# Minimal media component for media/cast on iOS.
if (is_ios) {
component("media_for_cast_ios") {
- configs += [ ":media_config" ]
+ configs += [
+ ":media_config",
+ ":media_implementation",
+ ]
all_dependent_configs = [ ":media_dependent_config" ]
include_dirs = [ "." ]
libs = [ "CoreVideo.framework" ]
@@ -480,19 +497,17 @@ if (is_ios) {
test("media_unittests") {
sources = [
+ "blink/skcanvas_video_renderer_unittest.cc",
"cdm/aes_decryptor_unittest.cc",
+ "cdm/cenc_utils_unittest.cc",
"cdm/json_web_key_unittest.cc",
"filters/audio_clock_unittest.cc",
"filters/audio_decoder_selector_unittest.cc",
"filters/audio_renderer_algorithm_unittest.cc",
- "filters/audio_renderer_impl_unittest.cc",
"filters/chunk_demuxer_unittest.cc",
"filters/decrypting_audio_decoder_unittest.cc",
"filters/decrypting_demuxer_stream_unittest.cc",
"filters/decrypting_video_decoder_unittest.cc",
- "filters/fake_demuxer_stream.cc",
- "filters/fake_demuxer_stream.h",
- "filters/fake_demuxer_stream_unittest.cc",
"filters/fake_video_decoder.cc",
"filters/fake_video_decoder.h",
"filters/fake_video_decoder_unittest.cc",
@@ -500,26 +515,19 @@ test("media_unittests") {
"filters/frame_processor_unittest.cc",
"filters/h264_bit_reader_unittest.cc",
"filters/h264_parser_unittest.cc",
- "filters/renderer_impl_unittest.cc",
- "filters/skcanvas_video_renderer_unittest.cc",
+ "filters/jpeg_parser_unittest.cc",
"filters/source_buffer_stream_unittest.cc",
+ "filters/video_cadence_estimator_unittest.cc",
"filters/video_decoder_selector_unittest.cc",
- "filters/video_frame_scheduler_impl_unittest.cc",
- "filters/video_frame_scheduler_unittest.cc",
"filters/video_frame_stream_unittest.cc",
- "filters/video_renderer_impl_unittest.cc",
- "midi/midi_manager_unittest.cc",
- "midi/midi_manager_usb_unittest.cc",
- "midi/midi_message_queue_unittest.cc",
- "midi/midi_message_util_unittest.cc",
- "midi/usb_midi_descriptor_parser_unittest.cc",
- "midi/usb_midi_input_stream_unittest.cc",
- "midi/usb_midi_output_stream_unittest.cc",
- "video/capture/fake_video_capture_device_unittest.cc",
- "video/capture/video_capture_device_unittest.cc",
+ "filters/video_renderer_algorithm_unittest.cc",
+ "filters/vp8_bool_decoder_unittest.cc",
+ "filters/vp8_parser_unittest.cc",
"formats/common/offset_byte_queue_unittest.cc",
"formats/webm/cluster_builder.cc",
"formats/webm/cluster_builder.h",
+ "formats/webm/opus_packet_builder.cc",
+ "formats/webm/opus_packet_builder.h",
"formats/webm/tracks_builder.cc",
"formats/webm/tracks_builder.h",
"formats/webm/webm_cluster_parser_unittest.cc",
@@ -527,8 +535,18 @@ test("media_unittests") {
"formats/webm/webm_parser_unittest.cc",
"formats/webm/webm_tracks_parser_unittest.cc",
"formats/webm/webm_webvtt_parser_unittest.cc",
+ "renderers/audio_renderer_impl_unittest.cc",
+ "renderers/renderer_impl_unittest.cc",
+ "renderers/video_renderer_impl_unittest.cc",
+ "video/capture/fake_video_capture_device_unittest.cc",
+ "video/capture/video_capture_device_unittest.cc",
+ "video/h264_poc_unittest.cc",
]
+ # TODO(wolenetz): Fix size_t to int trunctaion in win64.
+ # See http://crbug.com/171009
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
deps = [
":media",
":test_support",
@@ -538,6 +556,7 @@ test("media_unittests") {
"//media/audio:test_support",
"//media/base:unittests",
"//media/base:test_support",
+ "//media/test:pipeline_integration_tests",
"//skia", # Direct dependency required to inherit config.
"//testing/gmock",
"//testing/gtest",
@@ -550,7 +569,7 @@ test("media_unittests") {
deps += [
"//media/base/android",
# TODO(GYP)
- #"//testing/android:native_test_native_code"
+ #"//testing/android/native_test:native_test_native_code"
]
}
@@ -564,12 +583,10 @@ test("media_unittests") {
"filters/ffmpeg_glue_unittest.cc",
"filters/ffmpeg_video_decoder_unittest.cc",
"filters/in_memory_url_protocol_unittest.cc",
- "filters/pipeline_integration_test.cc",
- "filters/pipeline_integration_test_base.cc",
]
}
- if (cpu_arch != "arm" && is_chromeos && use_x11) {
+ if (current_cpu != "arm" && is_chromeos) {
sources += [ "filters/h264_bitstream_buffer_unittest.cc" ]
}
@@ -600,41 +617,28 @@ test("media_unittests") {
]
}
- if (is_win && cpu_arch == "x64") {
- cflags += [
- "/wd4267" # TODO(wolenetz): Fix size_t to int trunctaion in win64. See
- # http://crbug.com/171009
- ]
+ if (is_mac || is_ios) {
+ deps += [ "//media/base/mac" ]
}
if (is_mac) {
- sources += [
- "video/capture/mac/video_capture_device_factory_mac_unittest.mm"
- ]
+ sources +=
+ [ "video/capture/mac/video_capture_device_factory_mac_unittest.mm" ]
}
-# include_dirs += [
-# # Needed by media_drm_bridge.cc.
-# target_gen_dir,
-# ],
+ # include_dirs += [
+ # # Needed by media_drm_bridge.cc.
+ # target_gen_dir,
+ # ],
configs += [ ":media_config" ]
if (media_use_ffmpeg) {
- deps += [
- "//third_party/ffmpeg", # Direct dependency required to inherit config.
- ]
+ deps += [ "//third_party/ffmpeg" ] # Direct dependency required to inherit config.
}
}
test("media_perftests") {
- sources = []
- if (media_use_ffmpeg) {
- sources += [
- "filters/pipeline_integration_perftest.cc",
- "filters/pipeline_integration_test_base.cc",
- ]
- }
configs += [ ":media_config" ]
deps = [
":media",
@@ -644,6 +648,7 @@ test("media_perftests") {
"//media/audio:test_support",
"//media/base:perftests",
"//media/base:test_support",
+ "//media/test:pipeline_integration_perftests",
"//testing/gmock",
"//testing/gtest",
"//testing/perf",
@@ -654,21 +659,29 @@ test("media_perftests") {
deps += [ "//ui/gl" ]
}
if (media_use_ffmpeg) {
- deps += [
- "//third_party/ffmpeg", # Direct dependency required to inherit config.
- ]
+ deps += [ "//third_party/ffmpeg" ] # Direct dependency required to inherit config.
}
}
+# For running the subset of media_unittests that might require audio hardware
+# separately on GPU bots. media_unittests includes these too.
+test("audio_unittests") {
+ sources = [
+ "base/run_all_unittests.cc",
+ ]
+ deps = [
+ ":test_support",
+ "//base/test:test_support",
+ "//media/audio:unittests",
+ "//ui/gfx:test_support",
+ ]
+}
+
source_set("test_support") {
testonly = true
sources = [
- "filters/clockless_video_frame_scheduler.cc",
- "filters/clockless_video_frame_scheduler.h",
- "filters/mock_gpu_video_accelerator_factories.cc",
- "filters/mock_gpu_video_accelerator_factories.h",
- "filters/test_video_frame_scheduler.cc",
- "filters/test_video_frame_scheduler.h",
+ "renderers/mock_gpu_video_accelerator_factories.cc",
+ "renderers/mock_gpu_video_accelerator_factories.h",
"video/mock_video_decode_accelerator.cc",
"video/mock_video_decode_accelerator.h",
]
@@ -696,84 +709,36 @@ component("shared_memory_support") {
"base/vector_math.cc",
"base/vector_math.h",
]
- configs += [ ":media_config" ]
- defines = [ "MEDIA_IMPLEMENTATION" ]
- deps = [ "//base" ]
+ configs += [
+ ":media_config",
+ ":media_implementation",
+ ]
+ deps = [
+ "//base",
+ ]
}
if (media_use_ffmpeg) {
- test("ffmpeg_unittests") {
- sources = [ "ffmpeg/ffmpeg_unittest.cc" ]
-
- deps = [
- ":media",
- ":test_support",
- "//base",
- "//base:i18n",
- "//base/test:test_support",
- "//testing/gtest",
- "//third_party/ffmpeg",
- "//ui/gfx:test_support",
- ]
- }
-
test("ffmpeg_regression_tests") {
sources = [
"base/run_all_unittests.cc",
"ffmpeg/ffmpeg_regression_tests.cc",
- "filters/pipeline_integration_test_base.cc",
]
- configs += [ ":media_config" ]
+
+ configs += [ "//media:media_config" ]
+
deps = [
- ":media",
- ":test_support",
"//base/test:test_support",
+ "//media",
+ "//media:test_support",
"//media/audio:test_support",
"//media/base:test_support",
+ "//media/test:pipeline_integration_tests",
"//testing/gmock",
"//testing/gtest",
"//third_party/ffmpeg",
"//ui/gfx/geometry",
"//ui/gfx:test_support",
]
- # TODO(ajwong): This was in the original gyp, but it seems silly.
- # ['os_posix==1 and OS!="mac"', {
- # 'conditions': [
- # ['use_allocator!="none"', {
- # 'dependencies': [
- # '../base/allocator/allocator.gyp:allocator',
- # ],
- # }],
- # ],
- # }],
- }
-}
-
-if (use_x11) {
- executable("player_x11") {
- sources = [
- "tools/player_x11/data_source_logger.cc",
- "tools/player_x11/data_source_logger.h",
- "tools/player_x11/gl_video_renderer.cc",
- "tools/player_x11/gl_video_renderer.h",
- "tools/player_x11/player_x11.cc",
- "tools/player_x11/x11_video_renderer.cc",
- "tools/player_x11/x11_video_renderer.h",
- ]
- configs += [
- ":media_config",
- "//build/config/linux:x11",
- "//build/config/linux:xext",
-# TODO(ajwong): Why does xext get a separate thing in //build/config/linux:BUILD.gn
- # "//build/config/linux:xrender",
- ]
- deps = [
- ":media",
- ":shared_memory_support",
- "//base",
- "//ui/gl",
- "//ui/gfx",
- "//ui/gfx/geometry",
- ]
}
}
diff --git a/chromium/media/DEPS b/chromium/media/DEPS
index f9c29befabd..6d3642e55b7 100644
--- a/chromium/media/DEPS
+++ b/chromium/media/DEPS
@@ -1,7 +1,11 @@
# Do NOT add net/ or ui/base without a great reason, they're huge!
include_rules = [
+ "+crypto",
+ "+device/udev_linux",
+ "+device/usb",
"+gpu",
"+jni",
+ "+skia/ext",
"+third_party/ffmpeg",
"+third_party/libvpx",
"+third_party/libyuv",
@@ -11,5 +15,6 @@ include_rules = [
"+ui/gfx",
"+ui/gl",
"+ui/ozone",
+ "+third_party/widevine/cdm/widevine_cdm_common.h",
"-media/blink",
]
diff --git a/chromium/media/OWNERS b/chromium/media/OWNERS
index ff074fbad98..afecf18fe0d 100644
--- a/chromium/media/OWNERS
+++ b/chromium/media/OWNERS
@@ -1,7 +1,18 @@
+# NOTE: Do not use these owners when you're in a subdirectory that has
+# OWNERS file. For example:
+# - cast
+# - midi
+# - ozone
+# - video/capture
+# Instead prefer the OWNERS in the subdirectory as they will be more familiar,
+# and to load balance. Only use OWNERS in this file for these subdirectories
+# when doing refactorings and general cleanups.
+
dalecurtis@chromium.org
ddorwin@chromium.org
+jrummell@chromium.org
+sandersd@chromium.org
scherkus@chromium.org
-vrk@chromium.org
wolenetz@chromium.org
xhwang@chromium.org
diff --git a/chromium/media/PRESUBMIT.py b/chromium/media/PRESUBMIT.py
index 279115ccb2f..feba2f719a5 100644
--- a/chromium/media/PRESUBMIT.py
+++ b/chromium/media/PRESUBMIT.py
@@ -5,7 +5,7 @@
"""Top-level presubmit script for Chromium media component.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into gcl.
+for more details about the presubmit API built into depot_tools.
"""
def _FilterFile(affected_file):
diff --git a/chromium/media/audio/BUILD.gn b/chromium/media/audio/BUILD.gn
index 3ea1666e26a..11d3d76bf9b 100644
--- a/chromium/media/audio/BUILD.gn
+++ b/chromium/media/audio/BUILD.gn
@@ -12,8 +12,12 @@ if (!link_pulseaudio) {
extra_header = "pulse/pulse_stub_header.fragment"
script = "../../tools/generate_stubs/generate_stubs.py"
- sources = [ "pulse/pulse.sigs" ]
- source_prereqs = [ extra_header ]
+ sources = [
+ "pulse/pulse.sigs",
+ ]
+ inputs = [
+ extra_header,
+ ]
stubs_filename_root = "pulse_stubs"
# TODO(ajwong): these need to be included in the pulse build.
@@ -22,12 +26,18 @@ if (!link_pulseaudio) {
"$target_gen_dir/pulse/$stubs_filename_root.h",
]
args = [
- "-i", rebase_path("$target_gen_dir/pulse", root_build_dir),
- "-o", rebase_path("$target_gen_dir/pulse", root_build_dir),
- "-t", "posix_stubs",
- "-e", rebase_path(extra_header, root_build_dir),
- "-s", stubs_filename_root,
- "-p", "media/audio/pulse",
+ "-i",
+ rebase_path("$target_gen_dir/pulse", root_build_dir),
+ "-o",
+ rebase_path("$target_gen_dir/pulse", root_build_dir),
+ "-t",
+ "posix_stubs",
+ "-e",
+ rebase_path(extra_header, root_build_dir),
+ "-s",
+ stubs_filename_root,
+ "-p",
+ "media/audio/pulse",
]
args += rebase_path(sources, root_build_dir)
@@ -72,16 +82,18 @@ source_set("audio") {
"audio_power_monitor.cc",
"audio_power_monitor.h",
"audio_source_diverter.h",
- "fake_audio_consumer.cc",
- "fake_audio_consumer.h",
+ "clockless_audio_sink.cc",
+ "clockless_audio_sink.h",
"fake_audio_input_stream.cc",
"fake_audio_input_stream.h",
- "fake_audio_log_factory.h",
"fake_audio_log_factory.cc",
+ "fake_audio_log_factory.h",
"fake_audio_manager.cc",
"fake_audio_manager.h",
"fake_audio_output_stream.cc",
"fake_audio_output_stream.h",
+ "fake_audio_worker.cc",
+ "fake_audio_worker.h",
"null_audio_sink.cc",
"null_audio_sink.h",
"sample_rates.cc",
@@ -103,7 +115,10 @@ source_set("audio") {
]
deps = []
libs = []
- configs += [ "//media:media_config" ]
+ configs += [
+ "//media:media_config",
+ "//media:media_implementation",
+ ]
if (is_mac) {
sources += [
@@ -160,9 +175,7 @@ source_set("audio") {
"android/opensles_output.h",
"android/opensles_wrapper.cc",
]
- deps += [
- "//media/base/android:media_jni_headers",
- ]
+ deps += [ "//media/base/android:media_jni_headers" ]
}
if (is_openbsd) {
@@ -173,9 +186,7 @@ source_set("audio") {
}
if (is_linux) {
- sources += [
- "linux/audio_manager_linux.cc",
- ]
+ sources += [ "linux/audio_manager_linux.cc" ]
}
if (use_alsa) {
@@ -235,13 +246,15 @@ source_set("audio") {
sources += get_target_outputs(":pulse_generate_stubs")
}
}
+
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
}
source_set("test_support") {
testonly = true
sources = [
- "clockless_audio_sink.cc",
- "clockless_audio_sink.h",
+ "audio_unittest_util.cc",
+ "audio_unittest_util.h",
"mock_audio_manager.cc",
"mock_audio_manager.h",
"mock_audio_source_callback.cc",
@@ -249,7 +262,9 @@ source_set("test_support") {
"test_audio_input_controller_factory.cc",
"test_audio_input_controller_factory.h",
]
- deps = [ "//testing/gmock" ]
+ deps = [
+ "//testing/gmock",
+ ]
configs += [ "//media:media_config" ]
}
@@ -258,33 +273,34 @@ source_set("unittests") {
sources = [
"audio_input_controller_unittest.cc",
"audio_input_unittest.cc",
- "audio_low_latency_input_output_unittest.cc",
+ "audio_manager_factory_unittest.cc",
"audio_manager_unittest.cc",
"audio_output_controller_unittest.cc",
"audio_output_device_unittest.cc",
"audio_output_proxy_unittest.cc",
"audio_parameters_unittest.cc",
"audio_power_monitor_unittest.cc",
- "fake_audio_consumer_unittest.cc",
+ "fake_audio_worker_unittest.cc",
"simple_sources_unittest.cc",
"virtual_audio_input_stream_unittest.cc",
"virtual_audio_output_stream_unittest.cc",
]
deps = [
":test_support",
+ "//media/base:test_support",
"//testing/gmock",
"//testing/gtest",
]
- configs += [ "//media:media_config" ]
+
+ configs += [
+ "//build/config/compiler:no_size_t_to_int_warning",
+ "//media:media_config",
+ ]
if (is_android) {
- sources += [
- "android/audio_android_unittest.cc",
- ]
+ sources += [ "android/audio_android_unittest.cc" ]
} else {
- sources += [
- "audio_input_volume_unittest.cc",
- ]
+ sources += [ "audio_input_volume_unittest.cc" ]
}
if (is_mac) {
@@ -325,6 +341,7 @@ source_set("unittests") {
if (use_alsa) {
sources += [
"alsa/alsa_output_unittest.cc",
+ "audio_low_latency_input_output_unittest.cc",
]
}
}
diff --git a/chromium/media/audio/OWNERS b/chromium/media/audio/OWNERS
index 17c8eccedba..397a75743e4 100644
--- a/chromium/media/audio/OWNERS
+++ b/chromium/media/audio/OWNERS
@@ -1,8 +1,5 @@
tommi@chromium.org
-# Linux/Pulse
-xians@chromium.org
-
# Windows
henrika@chromium.org
diff --git a/chromium/media/audio/agc_audio_stream.h b/chromium/media/audio/agc_audio_stream.h
index f7b909fed4d..a2958ce391a 100644
--- a/chromium/media/audio/agc_audio_stream.h
+++ b/chromium/media/audio/agc_audio_stream.h
@@ -138,14 +138,15 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
// be read in each AudioInputCallback::OnData() callback and fed to the
// render-side AGC. User must call StartAgc() as well to start measuring
// the microphone level.
- virtual void SetAutomaticGainControl(bool enabled) override {
+ bool SetAutomaticGainControl(bool enabled) override {
DVLOG(1) << "SetAutomaticGainControl(enabled=" << enabled << ")";
DCHECK(thread_checker_.CalledOnValidThread());
agc_is_enabled_ = enabled;
+ return true;
}
// Gets the current automatic gain control state.
- virtual bool GetAutomaticGainControl() override {
+ bool GetAutomaticGainControl() override {
DCHECK(thread_checker_.CalledOnValidThread());
return agc_is_enabled_;
}
diff --git a/chromium/media/audio/alsa/alsa_input.cc b/chromium/media/audio/alsa/alsa_input.cc
index 3c555a05b5d..67f23593652 100644
--- a/chromium/media/audio/alsa/alsa_input.cc
+++ b/chromium/media/audio/alsa/alsa_input.cc
@@ -8,7 +8,6 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "base/time/time.h"
#include "media/audio/alsa/alsa_output.h"
#include "media/audio/alsa/alsa_util.h"
#include "media/audio/alsa/alsa_wrapper.h"
diff --git a/chromium/media/audio/alsa/alsa_output.cc b/chromium/media/audio/alsa/alsa_output.cc
index 6e62d69c881..554aa5fbeed 100644
--- a/chromium/media/audio/alsa/alsa_output.cc
+++ b/chromium/media/audio/alsa/alsa_output.cc
@@ -37,10 +37,9 @@
#include <algorithm>
#include "base/bind.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/stl_util.h"
-#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "media/audio/alsa/alsa_util.h"
#include "media/audio/alsa/alsa_wrapper.h"
#include "media/audio/alsa/audio_manager_alsa.h"
@@ -58,6 +57,10 @@ static const int kPcmRecoverIsSilent = 1;
static const int kPcmRecoverIsSilent = 0;
#endif
+// The output channel layout if we set up downmixing for the kDefaultDevice
+// device.
+static const ChannelLayout kDefaultOutputChannelLayout = CHANNEL_LAYOUT_STEREO;
+
// While the "default" device may support multi-channel audio, in Alsa, only
// the device names surround40, surround41, surround50, etc, have a defined
// channel mapping according to Lennart:
@@ -215,25 +218,25 @@ bool AlsaPcmOutputStream::Open() {
stop_stream_ = true;
TransitionTo(kInError);
return false;
+ }
+ bytes_per_output_frame_ =
+ channel_mixer_ ? mixed_audio_bus_->channels() * bytes_per_sample_
+ : bytes_per_frame_;
+ uint32 output_packet_size = frames_per_packet_ * bytes_per_output_frame_;
+ buffer_.reset(new media::SeekableBuffer(0, output_packet_size));
+
+ // Get alsa buffer size.
+ snd_pcm_uframes_t buffer_size;
+ snd_pcm_uframes_t period_size;
+ int error =
+ wrapper_->PcmGetParams(playback_handle_, &buffer_size, &period_size);
+ if (error < 0) {
+ LOG(ERROR) << "Failed to get playback buffer size from ALSA: "
+ << wrapper_->StrError(error);
+ // Buffer size is at least twice of packet size.
+ alsa_buffer_frames_ = frames_per_packet_ * 2;
} else {
- bytes_per_output_frame_ = channel_mixer_ ?
- mixed_audio_bus_->channels() * bytes_per_sample_ : bytes_per_frame_;
- uint32 output_packet_size = frames_per_packet_ * bytes_per_output_frame_;
- buffer_.reset(new media::SeekableBuffer(0, output_packet_size));
-
- // Get alsa buffer size.
- snd_pcm_uframes_t buffer_size;
- snd_pcm_uframes_t period_size;
- int error = wrapper_->PcmGetParams(playback_handle_, &buffer_size,
- &period_size);
- if (error < 0) {
- LOG(ERROR) << "Failed to get playback buffer size from ALSA: "
- << wrapper_->StrError(error);
- // Buffer size is at least twice of packet size.
- alsa_buffer_frames_ = frames_per_packet_ * 2;
- } else {
- alsa_buffer_frames_ = buffer_size;
- }
+ alsa_buffer_frames_ = buffer_size;
}
return true;
@@ -351,7 +354,7 @@ void AlsaPcmOutputStream::BufferPacket(bool* source_exhausted) {
*source_exhausted = false;
// Request more data only when we run out of data in the buffer, because
- // WritePacket() comsumes only the current chunk of data.
+ // WritePacket() consumes only the current chunk of data.
if (!buffer_->forward_bytes()) {
// Before making a request to source for data we need to determine the
// delay (in bytes) for the requested data to be played.
@@ -368,13 +371,34 @@ void AlsaPcmOutputStream::BufferPacket(bool* source_exhausted) {
// TODO(dalecurtis): Channel downmixing, upmixing, should be done in mixer;
// volume adjust should use SSE optimized vector_fmul() prior to interleave.
AudioBus* output_bus = audio_bus_.get();
+ ChannelLayout output_channel_layout = channel_layout_;
if (channel_mixer_) {
output_bus = mixed_audio_bus_.get();
channel_mixer_->Transform(audio_bus_.get(), output_bus);
+ output_channel_layout = kDefaultOutputChannelLayout;
// Adjust packet size for downmix.
packet_size = packet_size / bytes_per_frame_ * bytes_per_output_frame_;
}
+ // Reorder channels for 5.0, 5.1, and 7.1 to match ALSA's channel order,
+ // which has front center at channel index 4 and LFE at channel index 5.
+ // See http://ffmpeg.org/pipermail/ffmpeg-cvslog/2011-June/038454.html.
+ switch (output_channel_layout) {
+ case media::CHANNEL_LAYOUT_5_0:
+ case media::CHANNEL_LAYOUT_5_0_BACK:
+ output_bus->SwapChannels(2, 3);
+ output_bus->SwapChannels(3, 4);
+ break;
+ case media::CHANNEL_LAYOUT_5_1:
+ case media::CHANNEL_LAYOUT_5_1_BACK:
+ case media::CHANNEL_LAYOUT_7_1:
+ output_bus->SwapChannels(2, 4);
+ output_bus->SwapChannels(3, 5);
+ break;
+ default:
+ break;
+ }
+
// Note: If this ever changes to output raw float the data must be clipped
// and sanitized since it may come from an untrusted source such as NaCl.
output_bus->Scale(volume_);
@@ -408,7 +432,6 @@ void AlsaPcmOutputStream::WritePacket() {
const uint8* buffer_data;
int buffer_size;
if (buffer_->GetCurrentChunk(&buffer_data, &buffer_size)) {
- buffer_size = buffer_size - (buffer_size % bytes_per_output_frame_);
snd_pcm_sframes_t frames = std::min(
static_cast<snd_pcm_sframes_t>(buffer_size / bytes_per_output_frame_),
GetAvailableFrames());
@@ -588,7 +611,8 @@ snd_pcm_sframes_t AlsaPcmOutputStream::GetCurrentDelay() {
// driver is PulseAudio based, certain configuration settings (e.g., tsched=1)
// will generate much larger delay values than |alsa_buffer_frames_|, so only
// clip if delay is truly crazy (> 10x expected).
- if (static_cast<snd_pcm_uframes_t>(delay) > alsa_buffer_frames_ * 10) {
+ if (delay < 0 ||
+ static_cast<snd_pcm_uframes_t>(delay) > alsa_buffer_frames_ * 10) {
delay = alsa_buffer_frames_ - GetAvailableFrames();
}
@@ -632,10 +656,12 @@ snd_pcm_t* AlsaPcmOutputStream::AutoSelectDevice(unsigned int latency) {
// 1) Attempt to open a device that best matches the number of channels
// requested.
// 2) If that fails, attempt the "plug:" version of it in case ALSA can
- // remap do some software conversion to make it work.
- // 3) Fallback to kDefaultDevice.
- // 4) If that fails too, try the "plug:" version of kDefaultDevice.
- // 5) Give up.
+ // remap and do some software conversion to make it work.
+ // 3) If that fails, attempt the "plug:" version of the guessed name in
+ // case ALSA can remap and do some software conversion to make it work.
+ // 4) Fallback to kDefaultDevice.
+ // 5) If that fails too, try the "plug:" version of kDefaultDevice.
+ // 6) Give up.
snd_pcm_t* handle = NULL;
device_name_ = FindDeviceForChannels(channels_);
@@ -656,6 +682,17 @@ snd_pcm_t* AlsaPcmOutputStream::AutoSelectDevice(unsigned int latency) {
latency)) != NULL) {
return handle;
}
+
+ // Step 3.
+ device_name_ = GuessSpecificDeviceName(channels_);
+ if (!device_name_.empty()) {
+ device_name_ = kPlugPrefix + device_name_;
+ if ((handle = alsa_util::OpenPlaybackDevice(
+ wrapper_, device_name_.c_str(), channels_, sample_rate_,
+ pcm_format_, latency)) != NULL) {
+ return handle;
+ }
+ }
}
// For the kDefaultDevice device, we can only reliably depend on 2-channel
@@ -664,14 +701,14 @@ snd_pcm_t* AlsaPcmOutputStream::AutoSelectDevice(unsigned int latency) {
// downmixing.
uint32 default_channels = channels_;
if (default_channels > 2) {
- channel_mixer_.reset(new ChannelMixer(
- channel_layout_, CHANNEL_LAYOUT_STEREO));
+ channel_mixer_.reset(
+ new ChannelMixer(channel_layout_, kDefaultOutputChannelLayout));
default_channels = 2;
mixed_audio_bus_ = AudioBus::Create(
default_channels, audio_bus_->frames());
}
- // Step 3.
+ // Step 4.
device_name_ = kDefaultDevice;
if ((handle = alsa_util::OpenPlaybackDevice(
wrapper_, device_name_.c_str(), default_channels, sample_rate_,
@@ -679,7 +716,7 @@ snd_pcm_t* AlsaPcmOutputStream::AutoSelectDevice(unsigned int latency) {
return handle;
}
- // Step 4.
+ // Step 5.
device_name_ = kPlugPrefix + device_name_;
if ((handle = alsa_util::OpenPlaybackDevice(
wrapper_, device_name_.c_str(), default_channels, sample_rate_,
diff --git a/chromium/media/audio/alsa/alsa_output_unittest.cc b/chromium/media/audio/alsa/alsa_output_unittest.cc
index a68be4bbd0c..5a7a3aff87f 100644
--- a/chromium/media/audio/alsa/alsa_output_unittest.cc
+++ b/chromium/media/audio/alsa/alsa_output_unittest.cc
@@ -85,14 +85,14 @@ class MockAudioManagerAlsa : public AudioManagerAlsa {
// of active output streams. It is because the number of active streams
// is managed inside MakeAudioOutputStream, and we don't use
// MakeAudioOutputStream to create the stream in the tests.
- virtual void ReleaseOutputStream(AudioOutputStream* stream) override {
+ void ReleaseOutputStream(AudioOutputStream* stream) override {
DCHECK(stream);
delete stream;
}
// We don't mock this method since all tests will do the same thing
// and use the current task runner.
- virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() override {
+ scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() override {
return base::MessageLoop::current()->message_loop_proxy();
}
@@ -163,6 +163,7 @@ class AlsaPcmOutputStreamTest : public testing::Test {
static char kSurround70[];
static char kSurround71[];
static void* kFakeHints[];
+ static char kGenericSurround50[];
StrictMock<MockAlsaWrapper> mock_alsa_wrapper_;
scoped_ptr<StrictMock<MockAudioManagerAlsa> > mock_manager_;
@@ -202,6 +203,7 @@ char AlsaPcmOutputStreamTest::kSurround71[] = "surround71:CARD=foo,DEV=0";
void* AlsaPcmOutputStreamTest::kFakeHints[] = {
kSurround40, kSurround41, kSurround50, kSurround51,
kSurround70, kSurround71, NULL };
+char AlsaPcmOutputStreamTest::kGenericSurround50[] = "surround50";
// Custom action to clear a memory buffer.
ACTION(ClearBuffer) {
@@ -746,18 +748,21 @@ TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_FallbackDevices) {
// operations should be as follows. Assume the multi-channel device name is
// surround50:
//
- // 1) Try open "surround50"
- // 2) Try open "plug:surround50".
- // 3) Try open "default".
- // 4) Try open "plug:default".
- // 5) Give up trying to open.
+ // 1) Try open "surround50:CARD=foo,DEV=0"
+ // 2) Try open "plug:surround50:CARD=foo,DEV=0".
+ // 3) Try open "plug:surround50".
+ // 4) Try open "default".
+ // 5) Try open "plug:default".
+ // 6) Give up trying to open.
//
const string first_try = kSurround50;
const string second_try = string(AlsaPcmOutputStream::kPlugPrefix) +
kSurround50;
- const string third_try = AlsaPcmOutputStream::kDefaultDevice;
- const string fourth_try = string(AlsaPcmOutputStream::kPlugPrefix) +
- AlsaPcmOutputStream::kDefaultDevice;
+ const string third_try = string(AlsaPcmOutputStream::kPlugPrefix) +
+ kGenericSurround50;
+ const string fourth_try = AlsaPcmOutputStream::kDefaultDevice;
+ const string fifth_try = string(AlsaPcmOutputStream::kPlugPrefix) +
+ AlsaPcmOutputStream::kDefaultDevice;
EXPECT_CALL(mock_alsa_wrapper_, DeviceNameHint(_, _, _))
.WillOnce(DoAll(SetArgumentPointee<2>(&kFakeHints[0]), Return(0)));
@@ -779,6 +784,8 @@ TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_FallbackDevices) {
.WillOnce(Return(kTestFailedErrno));
EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(fourth_try.c_str()), _, _))
.WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(fifth_try.c_str()), _, _))
+ .WillOnce(Return(kTestFailedErrno));
AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5_0);
EXPECT_FALSE(test_stream->AutoSelectDevice(5));
diff --git a/chromium/media/audio/alsa/alsa_util.cc b/chromium/media/audio/alsa/alsa_util.cc
index f26cbd30f2f..ffc3a39d3de 100644
--- a/chromium/media/audio/alsa/alsa_util.cc
+++ b/chromium/media/audio/alsa/alsa_util.cc
@@ -4,7 +4,6 @@
#include "media/audio/alsa/alsa_util.h"
-#include <string>
#include "base/logging.h"
#include "media/audio/alsa/alsa_wrapper.h"
diff --git a/chromium/media/audio/alsa/alsa_wrapper.cc b/chromium/media/audio/alsa/alsa_wrapper.cc
index 969f3c499f4..d77e27e4a98 100644
--- a/chromium/media/audio/alsa/alsa_wrapper.cc
+++ b/chromium/media/audio/alsa/alsa_wrapper.cc
@@ -4,7 +4,6 @@
#include "media/audio/alsa/alsa_wrapper.h"
-#include <alsa/asoundlib.h>
namespace media {
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.cc b/chromium/media/audio/alsa/audio_manager_alsa.cc
index 76248348c46..e44c8c85afe 100644
--- a/chromium/media/audio/alsa/audio_manager_alsa.cc
+++ b/chromium/media/audio/alsa/audio_manager_alsa.cc
@@ -50,7 +50,7 @@ static const char* kInvalidAudioInputDevices[] = {
// static
void AudioManagerAlsa::ShowLinuxAudioInputSettings() {
scoped_ptr<base::Environment> env(base::Environment::Create());
- CommandLine command_line(CommandLine::NO_PROGRAM);
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
switch (base::nix::GetDesktopEnvironment(env.get())) {
case base::nix::DESKTOP_ENVIRONMENT_GNOME:
command_line.SetProgram(base::FilePath("gnome-volume-control"));
@@ -69,7 +69,7 @@ void AudioManagerAlsa::ShowLinuxAudioInputSettings() {
<< "what command to use for your desktop environment.";
return;
}
- base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
+ base::LaunchProcess(command_line, base::LaunchOptions());
}
// Implementation of AudioManager.
@@ -335,9 +335,9 @@ AudioParameters AudioManagerAlsa::GetPreferredOutputStreamParameters(
AudioOutputStream* AudioManagerAlsa::MakeOutputStream(
const AudioParameters& params) {
std::string device_name = AlsaPcmOutputStream::kAutoSelectDevice;
- if (CommandLine::ForCurrentProcess()->HasSwitch(
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kAlsaOutputDevice)) {
- device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ device_name = base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
switches::kAlsaOutputDevice);
}
return new AlsaPcmOutputStream(device_name, params, wrapper_.get(), this);
@@ -347,8 +347,9 @@ AudioInputStream* AudioManagerAlsa::MakeInputStream(
const AudioParameters& params, const std::string& device_id) {
std::string device_name = (device_id == AudioManagerBase::kDefaultDeviceId) ?
AlsaPcmInputStream::kAutoSelectDevice : device_id;
- if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kAlsaInputDevice)) {
- device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kAlsaInputDevice)) {
+ device_name = base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
switches::kAlsaInputDevice);
}
diff --git a/chromium/media/audio/android/audio_android_unittest.cc b/chromium/media/audio/android/audio_android_unittest.cc
index 88a7582a732..b8ebccf3b2b 100644
--- a/chromium/media/audio/android/audio_android_unittest.cc
+++ b/chromium/media/audio/android/audio_android_unittest.cc
@@ -19,6 +19,7 @@
#include "media/audio/android/audio_manager_android.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/mock_audio_source_callback.h"
#include "media/base/decoder_buffer.h"
#include "media/base/seekable_buffer.h"
@@ -91,7 +92,7 @@ static double ExpectedTimeBetweenCallbacks(AudioParameters params) {
// Helper method which verifies that the device list starts with a valid
// default device name followed by non-default device names.
static void CheckDeviceNames(const AudioDeviceNames& device_names) {
- VLOG(2) << "Got " << device_names.size() << " audio devices.";
+ DVLOG(2) << "Got " << device_names.size() << " audio devices.";
if (device_names.empty()) {
// Log a warning so we can see the status on the build bots. No need to
// break the test though since this does successfully test the code and
@@ -113,8 +114,8 @@ static void CheckDeviceNames(const AudioDeviceNames& device_names) {
while (it != device_names.end()) {
EXPECT_FALSE(it->device_name.empty());
EXPECT_FALSE(it->unique_id.empty());
- VLOG(2) << "Device ID(" << it->unique_id
- << "), label: " << it->device_name;
+ DVLOG(2) << "Device ID(" << it->unique_id
+ << "), label: " << it->device_name;
EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
it->device_name);
EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
@@ -169,17 +170,16 @@ class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
// Log the name of the file which is used as input for this test.
base::FilePath file_path = GetTestDataFilePath(name);
- VLOG(0) << "Reading from file: " << file_path.value().c_str();
+ DVLOG(0) << "Reading from file: " << file_path.value().c_str();
}
- virtual ~FileAudioSource() {}
+ ~FileAudioSource() override {}
// AudioOutputStream::AudioSourceCallback implementation.
// Use samples read from a data file and fill up the audio buffer
// provided to us in the callback.
- virtual int OnMoreData(AudioBus* audio_bus,
- uint32 total_bytes_delay) override {
+ int OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) override {
bool stop_playing = false;
int max_size =
audio_bus->frames() * audio_bus->channels() * kBytesPerSample;
@@ -207,7 +207,7 @@ class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
return frames;
}
- virtual void OnError(AudioOutputStream* stream) override {}
+ void OnError(AudioOutputStream* stream) override {}
int file_size() { return file_->data_size(); }
@@ -240,10 +240,10 @@ class FileAudioSink : public AudioInputStream::AudioInputCallback {
file_path = file_path.AppendASCII(file_name.c_str());
binary_file_ = base::OpenFile(file_path, "wb");
DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
- VLOG(0) << "Writing to file: " << file_path.value().c_str();
+ DVLOG(0) << "Writing to file: " << file_path.value().c_str();
}
- virtual ~FileAudioSink() {
+ ~FileAudioSink() override {
int bytes_written = 0;
while (bytes_written < buffer_->forward_capacity()) {
const uint8* chunk;
@@ -263,10 +263,10 @@ class FileAudioSink : public AudioInputStream::AudioInputCallback {
}
// AudioInputStream::AudioInputCallback implementation.
- virtual void OnData(AudioInputStream* stream,
- const AudioBus* src,
- uint32 hardware_delay_bytes,
- double volume) override {
+ void OnData(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) override {
const int num_samples = src->frames() * src->channels();
scoped_ptr<int16> interleaved(new int16[num_samples]);
const int bytes_per_sample = sizeof(*interleaved);
@@ -280,7 +280,7 @@ class FileAudioSink : public AudioInputStream::AudioInputCallback {
event_->Signal();
}
- virtual void OnError(AudioInputStream* stream) override {}
+ void OnError(AudioInputStream* stream) override {}
private:
base::WaitableEvent* event_;
@@ -308,13 +308,13 @@ class FullDuplexAudioSinkSource
buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
}
- virtual ~FullDuplexAudioSinkSource() {}
+ ~FullDuplexAudioSinkSource() override {}
// AudioInputStream::AudioInputCallback implementation
- virtual void OnData(AudioInputStream* stream,
- const AudioBus* src,
- uint32 hardware_delay_bytes,
- double volume) override {
+ void OnData(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) override {
const base::TimeTicks now_time = base::TimeTicks::Now();
const int diff = (now_time - previous_time_).InMilliseconds();
@@ -351,11 +351,10 @@ class FullDuplexAudioSinkSource
}
}
- virtual void OnError(AudioInputStream* stream) override {}
+ void OnError(AudioInputStream* stream) override {}
// AudioOutputStream::AudioSourceCallback implementation
- virtual int OnMoreData(AudioBus* dest,
- uint32 total_bytes_delay) override {
+ int OnMoreData(AudioBus* dest, uint32 total_bytes_delay) override {
const int size_in_bytes =
(params_.bits_per_sample() / 8) * dest->frames() * dest->channels();
EXPECT_EQ(size_in_bytes, params_.GetBytesPerBuffer());
@@ -383,7 +382,7 @@ class FullDuplexAudioSinkSource
return dest->frames();
}
- virtual void OnError(AudioOutputStream* stream) override {}
+ void OnError(AudioOutputStream* stream) override {}
private:
// Converts from bytes to milliseconds given number of bytes and existing
@@ -414,8 +413,7 @@ class AudioAndroidOutputTest : public testing::Test {
audio_output_stream_(NULL) {
}
- virtual ~AudioAndroidOutputTest() {
- }
+ ~AudioAndroidOutputTest() override {}
protected:
AudioManager* audio_manager() { return audio_manager_.get(); }
@@ -512,10 +510,10 @@ class AudioAndroidOutputTest : public testing::Test {
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
- VLOG(0) << "expected time between callbacks: "
- << expected_time_between_callbacks_ms << " ms";
- VLOG(0) << "average time between callbacks: "
- << average_time_between_callbacks_ms << " ms";
+ DVLOG(0) << "expected time between callbacks: "
+ << expected_time_between_callbacks_ms << " ms";
+ DVLOG(0) << "average time between callbacks: "
+ << average_time_between_callbacks_ms << " ms";
EXPECT_GE(average_time_between_callbacks_ms,
0.70 * expected_time_between_callbacks_ms);
EXPECT_LE(average_time_between_callbacks_ms,
@@ -666,10 +664,10 @@ class AudioAndroidInputTest : public AudioAndroidOutputTest,
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
- VLOG(0) << "expected time between callbacks: "
- << expected_time_between_callbacks_ms << " ms";
- VLOG(0) << "average time between callbacks: "
- << average_time_between_callbacks_ms << " ms";
+ DVLOG(0) << "expected time between callbacks: "
+ << expected_time_between_callbacks_ms << " ms";
+ DVLOG(0) << "average time between callbacks: "
+ << average_time_between_callbacks_ms << " ms";
EXPECT_GE(average_time_between_callbacks_ms,
0.70 * expected_time_between_callbacks_ms);
EXPECT_LE(average_time_between_callbacks_ms,
@@ -722,19 +720,18 @@ TEST_P(AudioAndroidInputTest, GetDefaultInputStreamParameters) {
// so that we can log the real (non-overridden) values of the effects.
GetDefaultInputStreamParametersOnAudioThread();
EXPECT_TRUE(audio_input_parameters().IsValid());
- VLOG(1) << audio_input_parameters();
+ DVLOG(1) << audio_input_parameters();
}
// Get the default audio output parameters and log the result.
TEST_F(AudioAndroidOutputTest, GetDefaultOutputStreamParameters) {
GetDefaultOutputStreamParametersOnAudioThread();
- VLOG(1) << audio_output_parameters();
+ DVLOG(1) << audio_output_parameters();
}
// Verify input device enumeration.
TEST_F(AudioAndroidInputTest, GetAudioInputDeviceNames) {
- if (!audio_manager()->HasAudioInputDevices())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(audio_manager()->HasAudioInputDevices());
AudioDeviceNames devices;
RunOnAudioThread(
base::Bind(&AudioManager::GetAudioInputDeviceNames,
@@ -745,8 +742,7 @@ TEST_F(AudioAndroidInputTest, GetAudioInputDeviceNames) {
// Verify output device enumeration.
TEST_F(AudioAndroidOutputTest, GetAudioOutputDeviceNames) {
- if (!audio_manager()->HasAudioOutputDevices())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(audio_manager()->HasAudioOutputDevices());
AudioDeviceNames devices;
RunOnAudioThread(
base::Bind(&AudioManager::GetAudioOutputDeviceNames,
@@ -840,7 +836,7 @@ TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacksNonDefaultParameters) {
// automatized test on bots.
TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
GetDefaultOutputStreamParametersOnAudioThread();
- VLOG(1) << audio_output_parameters();
+ DVLOG(1) << audio_output_parameters();
MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
std::string file_name;
@@ -862,7 +858,7 @@ TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
FileAudioSource source(&event, file_name);
OpenAndStartAudioOutputStreamOnAudioThread(&source);
- VLOG(0) << ">> Verify that the file is played out correctly...";
+ DVLOG(0) << ">> Verify that the file is played out correctly...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
StopAndCloseAudioOutputStreamOnAudioThread();
}
@@ -873,7 +869,7 @@ TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
// automatized test on bots.
TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
AudioParameters params = GetInputStreamParameters();
- VLOG(1) << params;
+ DVLOG(1) << params;
MakeAudioInputStreamOnAudioThread(params);
std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm",
@@ -885,7 +881,7 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
FileAudioSink sink(&event, params, file_name);
OpenAndStartAudioInputStreamOnAudioThread(&sink);
- VLOG(0) << ">> Speak into the microphone to record audio...";
+ DVLOG(0) << ">> Speak into the microphone to record audio...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
StopAndCloseAudioInputStreamOnAudioThread();
}
@@ -896,11 +892,11 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
// automatized test on bots.
TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
AudioParameters in_params = GetInputStreamParameters();
- VLOG(1) << in_params;
+ DVLOG(1) << in_params;
MakeAudioInputStreamOnAudioThread(in_params);
GetDefaultOutputStreamParametersOnAudioThread();
- VLOG(1) << audio_output_parameters();
+ DVLOG(1) << audio_output_parameters();
MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm",
@@ -918,7 +914,7 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
OpenAndStartAudioInputStreamOnAudioThread(&sink);
OpenAndStartAudioOutputStreamOnAudioThread(&source);
- VLOG(0) << ">> Speak into the microphone to record audio";
+ DVLOG(0) << ">> Speak into the microphone to record audio";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
StopAndCloseAudioOutputStreamOnAudioThread();
StopAndCloseAudioInputStreamOnAudioThread();
@@ -947,7 +943,7 @@ TEST_P(AudioAndroidInputTest,
default_input_params.bits_per_sample(),
default_input_params.sample_rate() / 100,
default_input_params.effects());
- VLOG(1) << io_params;
+ DVLOG(1) << io_params;
// Create input and output streams using the common audio parameters.
MakeAudioInputStreamOnAudioThread(io_params);
@@ -961,9 +957,9 @@ TEST_P(AudioAndroidInputTest,
// something that has been added by the test.
OpenAndStartAudioInputStreamOnAudioThread(&full_duplex);
OpenAndStartAudioOutputStreamOnAudioThread(&full_duplex);
- VLOG(1) << "HINT: an estimate of the extra FIFO delay will be updated "
- << "once per second during this test.";
- VLOG(0) << ">> Speak into the mic and listen to the audio in loopback...";
+ DVLOG(1) << "HINT: an estimate of the extra FIFO delay will be updated "
+ << "once per second during this test.";
+ DVLOG(0) << ">> Speak into the mic and listen to the audio in loopback...";
fflush(stdout);
base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(20));
printf("\n");
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
index 68dae3a4cce..0590ffcc144 100644
--- a/chromium/media/audio/android/audio_manager_android.cc
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -48,7 +48,9 @@ AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
AudioManagerAndroid::AudioManagerAndroid(AudioLogFactory* audio_log_factory)
: AudioManagerBase(audio_log_factory),
- communication_mode_is_on_(false) {
+ communication_mode_is_on_(false),
+ output_volume_override_set_(false),
+ output_volume_override_(0) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
// WARNING: This is executed on the UI loop, do not add any code here which
@@ -174,8 +176,8 @@ AudioInputStream* AudioManagerAndroid::MakeAudioInputStream(
void AudioManagerAndroid::ReleaseOutputStream(AudioOutputStream* stream) {
DCHECK(GetTaskRunner()->BelongsToCurrentThread());
- AudioManagerBase::ReleaseOutputStream(stream);
streams_.erase(static_cast<OpenSLESOutputStream*>(stream));
+ AudioManagerBase::ReleaseOutputStream(stream);
}
void AudioManagerAndroid::ReleaseInputStream(AudioInputStream* stream) {
@@ -265,6 +267,22 @@ void AudioManagerAndroid::SetMute(JNIEnv* env, jobject obj, jboolean muted) {
muted));
}
+void AudioManagerAndroid::SetOutputVolumeOverride(double volume) {
+ GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioManagerAndroid::DoSetVolumeOnAudioThread,
+ base::Unretained(this),
+ volume));
+}
+
+bool AudioManagerAndroid::HasOutputVolumeOverride(double* out_volume) const {
+ if (output_volume_override_set_) {
+ *out_volume = output_volume_override_;
+ }
+ return output_volume_override_set_;
+}
+
AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
@@ -382,4 +400,15 @@ void AudioManagerAndroid::DoSetMuteOnAudioThread(bool muted) {
}
}
+void AudioManagerAndroid::DoSetVolumeOnAudioThread(double volume) {
+ output_volume_override_set_ = true;
+ output_volume_override_ = volume;
+
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ for (OutputStreams::iterator it = streams_.begin();
+ it != streams_.end(); ++it) {
+ (*it)->SetVolume(volume);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/audio/android/audio_manager_android.h b/chromium/media/audio/android/audio_manager_android.h
index d1a8c8bf623..c64e6ee267d 100644
--- a/chromium/media/audio/android/audio_manager_android.h
+++ b/chromium/media/audio/android/audio_manager_android.h
@@ -23,34 +23,31 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
AudioManagerAndroid(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() override;
- virtual bool HasAudioInputDevices() override;
- virtual void GetAudioInputDeviceNames(
- AudioDeviceNames* device_names) override;
- virtual void GetAudioOutputDeviceNames(
- AudioDeviceNames* device_names) override;
- virtual AudioParameters GetInputStreamParameters(
+ bool HasAudioOutputDevices() override;
+ bool HasAudioInputDevices() override;
+ void GetAudioInputDeviceNames(AudioDeviceNames* device_names) override;
+ void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) override;
+ AudioParameters GetInputStreamParameters(
const std::string& device_id) override;
- virtual AudioOutputStream* MakeAudioOutputStream(
+ AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
const std::string& device_id) override;
- virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params,
- const std::string& device_id) override;
- virtual void ReleaseOutputStream(AudioOutputStream* stream) override;
- virtual void ReleaseInputStream(AudioInputStream* stream) override;
+ AudioInputStream* MakeAudioInputStream(const AudioParameters& params,
+ const std::string& device_id) override;
+ void ReleaseOutputStream(AudioOutputStream* stream) override;
+ void ReleaseInputStream(AudioInputStream* stream) override;
// Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
+ AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) override;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
const std::string& device_id) override;
- virtual AudioInputStream* MakeLinearInputStream(
+ AudioInputStream* MakeLinearInputStream(
const AudioParameters& params,
const std::string& device_id) override;
- virtual AudioInputStream* MakeLowLatencyInputStream(
+ AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params,
const std::string& device_id) override;
@@ -58,10 +55,15 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
void SetMute(JNIEnv* env, jobject obj, jboolean muted);
+ // Sets a volume that applies to all this manager's output audio streams.
+ // This overrides other SetVolume calls (e.g. through AudioHostMsg_SetVolume).
+ void SetOutputVolumeOverride(double volume);
+ bool HasOutputVolumeOverride(double* out_volume) const;
+
protected:
- virtual ~AudioManagerAndroid();
+ ~AudioManagerAndroid() override;
- virtual AudioParameters GetPreferredOutputStreamParameters(
+ AudioParameters GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) override;
@@ -78,6 +80,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
int GetOptimalOutputFrameSize(int sample_rate, int channels);
void DoSetMuteOnAudioThread(bool muted);
+ void DoSetVolumeOnAudioThread(double volume);
// Java AudioManager instance.
base::android::ScopedJavaGlobalRef<jobject> j_audio_manager_;
@@ -89,6 +92,10 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
// input stream is destroyed. Also affects the stream type of output streams.
bool communication_mode_is_on_;
+ // If set, overrides volume level on output streams
+ bool output_volume_override_set_;
+ double output_volume_override_;
+
DISALLOW_COPY_AND_ASSIGN(AudioManagerAndroid);
};
diff --git a/chromium/media/audio/android/audio_record_input.cc b/chromium/media/audio/android/audio_record_input.cc
index 1dd6118b184..92e0a0f46a5 100644
--- a/chromium/media/audio/android/audio_record_input.cc
+++ b/chromium/media/audio/android/audio_record_input.cc
@@ -125,8 +125,9 @@ double AudioRecordInputStream::GetVolume() {
return 0.0;
}
-void AudioRecordInputStream::SetAutomaticGainControl(bool enabled) {
+bool AudioRecordInputStream::SetAutomaticGainControl(bool enabled) {
NOTIMPLEMENTED();
+ return false;
}
bool AudioRecordInputStream::GetAutomaticGainControl() {
diff --git a/chromium/media/audio/android/audio_record_input.h b/chromium/media/audio/android/audio_record_input.h
index d4c4368c9f5..1a35fdd4810 100644
--- a/chromium/media/audio/android/audio_record_input.h
+++ b/chromium/media/audio/android/audio_record_input.h
@@ -28,19 +28,19 @@ class MEDIA_EXPORT AudioRecordInputStream : public AudioInputStream {
AudioRecordInputStream(AudioManagerAndroid* manager,
const AudioParameters& params);
- virtual ~AudioRecordInputStream();
+ ~AudioRecordInputStream() override;
// Implementation of AudioInputStream.
- virtual bool Open() override;
- virtual void Start(AudioInputCallback* callback) override;
- virtual void Stop() override;
- virtual void Close() override;
- virtual double GetMaxVolume() override;
- virtual void SetVolume(double volume) override;
- virtual double GetVolume() override;
- virtual void SetAutomaticGainControl(bool enabled) override;
- virtual bool GetAutomaticGainControl() override;
- virtual bool IsMuted() override;
+ bool Open() override;
+ void Start(AudioInputCallback* callback) override;
+ void Stop() override;
+ void Close() override;
+ double GetMaxVolume() override;
+ void SetVolume(double volume) override;
+ double GetVolume() override;
+ bool SetAutomaticGainControl(bool enabled) override;
+ bool GetAutomaticGainControl() override;
+ bool IsMuted() override;
static bool RegisterAudioRecordInput(JNIEnv* env);
diff --git a/chromium/media/audio/android/opensles_input.cc b/chromium/media/audio/android/opensles_input.cc
index d8e5f63d6fe..51588a3ebde 100644
--- a/chromium/media/audio/android/opensles_input.cc
+++ b/chromium/media/audio/android/opensles_input.cc
@@ -4,8 +4,8 @@
#include "media/audio/android/opensles_input.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "base/trace_event/trace_event.h"
#include "media/audio/android/audio_manager_android.h"
#include "media/base/audio_bus.h"
@@ -176,8 +176,9 @@ double OpenSLESInputStream::GetVolume() {
return 0.0;
}
-void OpenSLESInputStream::SetAutomaticGainControl(bool enabled) {
+bool OpenSLESInputStream::SetAutomaticGainControl(bool enabled) {
NOTIMPLEMENTED();
+ return false;
}
bool OpenSLESInputStream::GetAutomaticGainControl() {
diff --git a/chromium/media/audio/android/opensles_input.h b/chromium/media/audio/android/opensles_input.h
index ae62639d090..e4ec0824245 100644
--- a/chromium/media/audio/android/opensles_input.h
+++ b/chromium/media/audio/android/opensles_input.h
@@ -31,19 +31,19 @@ class OpenSLESInputStream : public AudioInputStream {
OpenSLESInputStream(AudioManagerAndroid* manager,
const AudioParameters& params);
- virtual ~OpenSLESInputStream();
+ ~OpenSLESInputStream() override;
// Implementation of AudioInputStream.
- virtual bool Open() override;
- virtual void Start(AudioInputCallback* callback) override;
- virtual void Stop() override;
- virtual void Close() override;
- virtual double GetMaxVolume() override;
- virtual void SetVolume(double volume) override;
- virtual double GetVolume() override;
- virtual void SetAutomaticGainControl(bool enabled) override;
- virtual bool GetAutomaticGainControl() override;
- virtual bool IsMuted() override;
+ bool Open() override;
+ void Start(AudioInputCallback* callback) override;
+ void Stop() override;
+ void Close() override;
+ double GetMaxVolume() override;
+ void SetVolume(double volume) override;
+ double GetVolume() override;
+ bool SetAutomaticGainControl(bool enabled) override;
+ bool GetAutomaticGainControl() override;
+ bool IsMuted() override;
private:
bool CreateRecorder();
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
index 4ac0af2891b..2974e1418ca 100644
--- a/chromium/media/audio/android/opensles_output.cc
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -4,8 +4,8 @@
#include "media/audio/android/opensles_output.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "base/trace_event/trace_event.h"
#include "media/audio/android/audio_manager_android.h"
#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
@@ -165,6 +165,12 @@ void OpenSLESOutputStream::Close() {
void OpenSLESOutputStream::SetVolume(double volume) {
DVLOG(2) << "OpenSLESOutputStream::SetVolume(" << volume << ")";
DCHECK(thread_checker_.CalledOnValidThread());
+
+ double volume_override = 0;
+ if (audio_manager_->HasOutputVolumeOverride(&volume_override)) {
+ volume = volume_override;
+ }
+
float volume_float = static_cast<float>(volume);
if (volume_float < 0.0f || volume_float > 1.0f) {
return;
diff --git a/chromium/media/audio/android/opensles_output.h b/chromium/media/audio/android/opensles_output.h
index 914fa6e63c9..0fef1bdb231 100644
--- a/chromium/media/audio/android/opensles_output.h
+++ b/chromium/media/audio/android/opensles_output.h
@@ -31,15 +31,15 @@ class OpenSLESOutputStream : public AudioOutputStream {
const AudioParameters& params,
SLint32 stream_type);
- virtual ~OpenSLESOutputStream();
+ ~OpenSLESOutputStream() override;
// Implementation of AudioOutputStream.
- virtual bool Open() override;
- virtual void Close() override;
- virtual void Start(AudioSourceCallback* callback) override;
- virtual void Stop() override;
- virtual void SetVolume(double volume) override;
- virtual void GetVolume(double* volume) override;
+ bool Open() override;
+ void Close() override;
+ void Start(AudioSourceCallback* callback) override;
+ void Stop() override;
+ void SetVolume(double volume) override;
+ void GetVolume(double* volume) override;
// Set the value of |muted_|. It does not affect |volume_| which can be
// got by calling GetVolume(). See comments for |muted_| below.
diff --git a/chromium/media/audio/audio_device_thread.cc b/chromium/media/audio/audio_device_thread.cc
index 24ba5096eb4..a0f283ee563 100644
--- a/chromium/media/audio/audio_device_thread.cc
+++ b/chromium/media/audio/audio_device_thread.cc
@@ -115,7 +115,7 @@ void AudioDeviceThread::Thread::Start() {
AddRef();
PlatformThread::CreateWithPriority(0, this, &thread_,
- base::kThreadPriority_RealtimeAudio);
+ base::ThreadPriority::REALTIME_AUDIO);
CHECK(!thread_.is_null());
}
@@ -170,7 +170,14 @@ void AudioDeviceThread::Thread::Run() {
if (bytes_read != sizeof(pending_data))
break;
- {
+ // kuint32max is a special signal which is returned after the browser
+ // stops the output device in response to a renderer side request.
+ //
+ // Avoid running Process() for the paused signal, we still need to update
+ // the buffer index if |synchronized_buffers_| is true though.
+ //
+ // See comments in AudioOutputController::DoPause() for details on why.
+ if (pending_data != kuint32max) {
base::AutoLock auto_lock(callback_lock_);
if (callback_)
callback_->Process(pending_data);
diff --git a/chromium/media/audio/audio_input_controller.cc b/chromium/media/audio/audio_input_controller.cc
index 44ecb29d56e..323de961581 100644
--- a/chromium/media/audio/audio_input_controller.cc
+++ b/chromium/media/audio/audio_input_controller.cc
@@ -5,12 +5,13 @@
#include "media/audio/audio_input_controller.h"
#include "base/bind.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
+#include "base/thread_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/scoped_histogram_timer.h"
#include "media/base/user_input_monitor.h"
using base::TimeDelta;
@@ -119,7 +120,7 @@ AudioInputController::AudioInputController(EventHandler* handler,
SyncWriter* sync_writer,
UserInputMonitor* user_input_monitor,
const bool agc_is_enabled)
- : creator_task_runner_(base::MessageLoopProxy::current()),
+ : creator_task_runner_(base::ThreadTaskRunnerHandle::Get()),
handler_(handler),
stream_(NULL),
data_is_active_(false),
@@ -329,7 +330,17 @@ void AudioInputController::DoCreateForStream(
// Set AGC state using mode in |agc_is_enabled_| which can only be enabled in
// CreateLowLatency().
+#if defined(AUDIO_POWER_MONITORING)
+ bool agc_is_supported = false;
+ agc_is_supported = stream_->SetAutomaticGainControl(agc_is_enabled_);
+ // Disable power measurements on platforms that does not support AGC at a
+ // lower level. AGC can fail on platforms where we don't support the
+ // functionality to modify the input volume slider. One such example is
+ // Windows XP.
+ power_measurement_is_enabled_ &= agc_is_supported;
+#else
stream_->SetAutomaticGainControl(agc_is_enabled_);
+#endif
// Create the data timer which will call FirstCheckForNoData(). The timer
// is started in DoRecord() and restarted in each DoCheckForNoData()
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
index dab4b0a123a..c352f709fb7 100644
--- a/chromium/media/audio/audio_input_device.cc
+++ b/chromium/media/audio/audio_input_device.cc
@@ -4,7 +4,6 @@
#include "media/audio/audio_input_device.h"
-#include "base/basictypes.h"
#include "base/bind.h"
#include "base/memory/scoped_vector.h"
#include "base/threading/thread_restrictions.h"
@@ -60,9 +59,9 @@ AudioInputDevice::AudioInputDevice(
// The correctness of the code depends on the relative values assigned in the
// State enum.
- COMPILE_ASSERT(IPC_CLOSED < IDLE, invalid_enum_value_assignment_0);
- COMPILE_ASSERT(IDLE < CREATING_STREAM, invalid_enum_value_assignment_1);
- COMPILE_ASSERT(CREATING_STREAM < RECORDING, invalid_enum_value_assignment_2);
+ static_assert(IPC_CLOSED < IDLE, "invalid enum value assignment 0");
+ static_assert(IDLE < CREATING_STREAM, "invalid enum value assignment 1");
+ static_assert(CREATING_STREAM < RECORDING, "invalid enum value assignment 2");
}
void AudioInputDevice::Initialize(const AudioParameters& params,
@@ -290,7 +289,7 @@ void AudioInputDevice::AudioThreadCallback::MapSharedMemory() {
reinterpret_cast<media::AudioInputBuffer*>(ptr);
scoped_ptr<media::AudioBus> audio_bus =
media::AudioBus::WrapMemory(audio_parameters_, buffer->audio);
- audio_buses_.push_back(audio_bus.release());
+ audio_buses_.push_back(audio_bus.Pass());
ptr += segment_length_;
}
}
diff --git a/chromium/media/audio/audio_input_unittest.cc b/chromium/media/audio/audio_input_unittest.cc
index d959680f6de..97b7bf0c9df 100644
--- a/chromium/media/audio/audio_input_unittest.cc
+++ b/chromium/media/audio/audio_input_unittest.cc
@@ -12,6 +12,7 @@
#include "base/threading/platform_thread.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_unittest_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -58,12 +59,8 @@ class AudioInputTest : public testing::Test {
~AudioInputTest() override { base::RunLoop().RunUntilIdle(); }
protected:
- AudioManager* audio_manager() { return audio_manager_.get(); }
-
- bool CanRunAudioTests() {
- bool has_input = audio_manager()->HasAudioInputDevices();
- LOG_IF(WARNING, !has_input) << "No input devices detected";
- return has_input;
+ bool InputDevicesAvailable() {
+ return audio_manager_->HasAudioInputDevices();
}
void MakeAudioInputStreamOnAudioThread() {
@@ -106,29 +103,29 @@ class AudioInputTest : public testing::Test {
}
void MakeAudioInputStream() {
- DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
- AudioParameters params = audio_manager()->GetInputStreamParameters(
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
+ AudioParameters params = audio_manager_->GetInputStreamParameters(
AudioManagerBase::kDefaultDeviceId);
- audio_input_stream_ = audio_manager()->MakeAudioInputStream(params,
+ audio_input_stream_ = audio_manager_->MakeAudioInputStream(params,
AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(audio_input_stream_);
}
void OpenAndClose() {
- DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
EXPECT_TRUE(audio_input_stream_->Open());
audio_input_stream_->Close();
audio_input_stream_ = NULL;
}
void OpenAndStart(AudioInputStream::AudioInputCallback* sink) {
- DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
EXPECT_TRUE(audio_input_stream_->Open());
audio_input_stream_->Start(sink);
}
void OpenStopAndClose() {
- DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
EXPECT_TRUE(audio_input_stream_->Open());
audio_input_stream_->Stop();
audio_input_stream_->Close();
@@ -136,7 +133,7 @@ class AudioInputTest : public testing::Test {
}
void StopAndClose() {
- DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
audio_input_stream_->Stop();
audio_input_stream_->Close();
audio_input_stream_ = NULL;
@@ -144,9 +141,9 @@ class AudioInputTest : public testing::Test {
// Synchronously runs the provided callback/closure on the audio thread.
void RunOnAudioThread(const base::Closure& closure) {
- if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ if (!audio_manager_->GetTaskRunner()->BelongsToCurrentThread()) {
base::WaitableEvent event(false, false);
- audio_manager()->GetTaskRunner()->PostTask(
+ audio_manager_->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&AudioInputTest::RunOnAudioThreadImpl,
base::Unretained(this),
@@ -160,7 +157,7 @@ class AudioInputTest : public testing::Test {
void RunOnAudioThreadImpl(const base::Closure& closure,
base::WaitableEvent* event) {
- DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
closure.Run();
event->Signal();
}
@@ -175,8 +172,7 @@ class AudioInputTest : public testing::Test {
// Test create and close of an AudioInputStream without recording audio.
TEST_F(AudioInputTest, CreateAndClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
MakeAudioInputStreamOnAudioThread();
CloseAudioInputStreamOnAudioThread();
}
@@ -189,8 +185,7 @@ TEST_F(AudioInputTest, CreateAndClose) {
#endif
// Test create, open and close of an AudioInputStream without recording audio.
TEST_F(AudioInputTest, MAYBE_OpenAndClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
MakeAudioInputStreamOnAudioThread();
OpenAndCloseAudioInputStreamOnAudioThread();
}
@@ -203,8 +198,7 @@ TEST_F(AudioInputTest, MAYBE_OpenAndClose) {
#endif
// Test create, open, stop and close of an AudioInputStream without recording.
TEST_F(AudioInputTest, MAYBE_OpenStopAndClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
MakeAudioInputStreamOnAudioThread();
OpenStopAndCloseAudioInputStreamOnAudioThread();
}
@@ -219,8 +213,7 @@ TEST_F(AudioInputTest, MAYBE_OpenStopAndClose) {
// Very simple test which starts capturing during half a second and verifies
// that recording starts.
TEST_F(AudioInputTest, MAYBE_Record) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
MakeAudioInputStreamOnAudioThread();
TestInputCallback test_callback;
diff --git a/chromium/media/audio/audio_input_volume_unittest.cc b/chromium/media/audio/audio_input_volume_unittest.cc
index a2213a3e92b..3fa1f40b907 100644
--- a/chromium/media/audio/audio_input_volume_unittest.cc
+++ b/chromium/media/audio/audio_input_volume_unittest.cc
@@ -8,6 +8,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_unittest_util.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_WIN)
@@ -37,25 +38,14 @@ double GetVolumeAfterSetVolumeOnLinux(AudioInputStream* ais,
class AudioInputVolumeTest : public ::testing::Test {
protected:
- AudioInputVolumeTest()
- : audio_manager_(AudioManager::CreateForTesting())
-#if defined(OS_WIN)
- , com_init_(base::win::ScopedCOMInitializer::kMTA)
-#endif
- {
- }
+ AudioInputVolumeTest() : audio_manager_(AudioManager::CreateForTesting()) {}
- bool CanRunAudioTests() {
+ bool HasCoreAudioAndInputDevices() {
#if defined(OS_WIN)
// TODO(henrika): add support for volume control on Windows XP as well.
- // For now, we might as well signal false already here to avoid running
- // these tests on Windows XP.
if (!CoreAudioUtil::IsSupported())
return false;
#endif
- if (!audio_manager_)
- return false;
-
return audio_manager_->HasAudioInputDevices();
}
@@ -91,10 +81,6 @@ class AudioInputVolumeTest : public ::testing::Test {
}
scoped_ptr<AudioManager> audio_manager_;
-
-#if defined(OS_WIN)
- base::win::ScopedCOMInitializer com_init_;
-#endif
};
#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
@@ -106,8 +92,7 @@ class AudioInputVolumeTest : public ::testing::Test {
#endif
TEST_F(AudioInputVolumeTest, MAYBE_InputVolumeTest) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices());
// Retrieve a list of all available input devices.
AudioDeviceNames device_names;
diff --git a/chromium/media/audio/audio_io.h b/chromium/media/audio/audio_io.h
index 3add053952c..56392154e32 100644
--- a/chromium/media/audio/audio_io.h
+++ b/chromium/media/audio/audio_io.h
@@ -56,6 +56,8 @@ class MEDIA_EXPORT AudioOutputStream {
// itself such as creating Windows or initializing COM.
class MEDIA_EXPORT AudioSourceCallback {
public:
+ virtual ~AudioSourceCallback() {}
+
// Provide more data by fully filling |dest|. The source will return
// the number of frames it filled. |total_bytes_delay| contains current
// number of bytes of delay buffered by the AudioOutputStream.
@@ -66,9 +68,6 @@ class MEDIA_EXPORT AudioOutputStream {
// a good place to stop accumulating sound data since is is likely that
// playback will not continue.
virtual void OnError(AudioOutputStream* stream) = 0;
-
- protected:
- virtual ~AudioSourceCallback() {}
};
virtual ~AudioOutputStream() {}
@@ -160,7 +159,7 @@ class MEDIA_EXPORT AudioInputStream {
virtual double GetVolume() = 0;
// Sets the Automatic Gain Control (AGC) state.
- virtual void SetAutomaticGainControl(bool enabled) = 0;
+ virtual bool SetAutomaticGainControl(bool enabled) = 0;
// Returns the Automatic Gain Control (AGC) state.
virtual bool GetAutomaticGainControl() = 0;
diff --git a/chromium/media/audio/audio_low_latency_input_output_unittest.cc b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
index 7d8fc0a68ae..26f9086c3be 100644
--- a/chromium/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
@@ -13,6 +13,7 @@
#include "build/build_config.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/fake_audio_log_factory.h"
#include "media/base/seekable_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -114,16 +115,6 @@ class AudioLowLatencyInputOutputTest : public testing::Test {
AudioManager* audio_manager() { return &mock_audio_manager_; }
base::MessageLoopForUI* message_loop() { return &message_loop_; }
- // Convenience method which ensures that we are not running on the build
- // bots and that at least one valid input and output device can be found.
- bool CanRunAudioTests() {
- bool input = audio_manager()->HasAudioInputDevices();
- bool output = audio_manager()->HasAudioOutputDevices();
- LOG_IF(WARNING, !input) << "No input device detected.";
- LOG_IF(WARNING, !output) << "No output device detected.";
- return input && output;
- }
-
private:
base::MessageLoopForUI message_loop_;
MockAudioManager mock_audio_manager_;
@@ -384,8 +375,8 @@ typedef StreamWrapper<AudioOutputStreamTraits> AudioOutputStreamWrapper;
// ylabel('delay [msec]')
// title('Full-duplex audio delay measurement');
TEST_F(AudioLowLatencyInputOutputTest, DISABLED_FullDuplexDelayMeasurement) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(audio_manager()->HasAudioInputDevices() &&
+ audio_manager()->HasAudioOutputDevices());
AudioInputStreamWrapper aisw(audio_manager());
AudioInputStream* ais = aisw.Create();
diff --git a/chromium/media/audio/audio_manager.cc b/chromium/media/audio/audio_manager.cc
index 6d5d70d087c..ed559f9fd93 100644
--- a/chromium/media/audio/audio_manager.cc
+++ b/chromium/media/audio/audio_manager.cc
@@ -6,17 +6,154 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/debug/alias.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/power_monitor/power_monitor.h"
+#include "build/build_config.h"
+#include "media/audio/audio_manager_factory.h"
#include "media/audio/fake_audio_log_factory.h"
+#include "media/base/media_switches.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#endif
namespace media {
namespace {
+
+// The singleton instance of AudioManager. This is set when Create() is called.
AudioManager* g_last_created = NULL;
-static base::LazyInstance<FakeAudioLogFactory>::Leaky g_fake_log_factory =
+
+// The singleton instance of AudioManagerFactory. This is only set if
+// SetFactory() is called. If it is set when Create() is called, its
+// CreateInstance() function is used to set |g_last_created|. Otherwise, the
+// linked implementation of media::CreateAudioManager is used to set
+// |g_last_created|.
+AudioManagerFactory* g_audio_manager_factory = NULL;
+
+// Maximum number of failed pings to the audio thread allowed. A crash will be
+// issued once this count is reached. We require at least two pings before
+// crashing to ensure unobservable power events aren't mistakenly caught (e.g.,
+// the system suspends before a OnSuspend() event can be fired.).
+const int kMaxHangFailureCount = 2;
+
+// Helper class for managing global AudioManager data and hang timers. If the
+// audio thread is unresponsive for more than two minutes we want to crash the
+// process so we can catch offenders quickly in the field.
+class AudioManagerHelper : public base::PowerObserver {
+ public:
+ AudioManagerHelper()
+ : max_hung_task_time_(base::TimeDelta::FromMinutes(1)),
+ hang_detection_enabled_(true) {}
+ ~AudioManagerHelper() override {}
+
+ void StartHangTimer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& monitor_task_runner) {
+ CHECK(!monitor_task_runner_);
+ monitor_task_runner_ = monitor_task_runner;
+ base::PowerMonitor::Get()->AddObserver(this);
+ hang_failures_ = 0;
+ UpdateLastAudioThreadTimeTick();
+ CrashOnAudioThreadHang();
+ }
+
+ // Disable hang detection when the system goes into the suspend state.
+ void OnSuspend() override {
+ base::AutoLock lock(hang_lock_);
+ hang_detection_enabled_ = false;
+ hang_failures_ = 0;
+ }
+
+ // Reenable hang detection once the system comes out of the suspend state.
+ void OnResume() override {
+ base::AutoLock lock(hang_lock_);
+ hang_detection_enabled_ = true;
+ last_audio_thread_timer_tick_ = base::TimeTicks::Now();
+ hang_failures_ = 0;
+ }
+
+ // Runs on |monitor_task_runner| typically, but may be started on any thread.
+ void CrashOnAudioThreadHang() {
+ {
+ base::AutoLock lock(hang_lock_);
+
+ // Don't attempt to verify the tick time if the system is in the process
+ // of suspending or resuming.
+ if (hang_detection_enabled_) {
+ const base::TimeTicks now = base::TimeTicks::Now();
+ const base::TimeDelta tick_delta = now - last_audio_thread_timer_tick_;
+ if (tick_delta > max_hung_task_time_) {
+ if (++hang_failures_ >= kMaxHangFailureCount) {
+ base::debug::Alias(&now);
+ base::debug::Alias(&tick_delta);
+ base::debug::Alias(&last_audio_thread_timer_tick_);
+ CHECK(false);
+ }
+ } else {
+ hang_failures_ = 0;
+ }
+ }
+ }
+
+ // Don't hold the lock while posting the next task.
+ monitor_task_runner_->PostDelayedTask(
+ FROM_HERE, base::Bind(&AudioManagerHelper::CrashOnAudioThreadHang,
+ base::Unretained(this)),
+ max_hung_task_time_);
+ }
+
+ // Runs on the audio thread typically, but may be started on any thread.
+ void UpdateLastAudioThreadTimeTick() {
+ {
+ base::AutoLock lock(hang_lock_);
+ last_audio_thread_timer_tick_ = base::TimeTicks::Now();
+ hang_failures_ = 0;
+ }
+
+ // Don't hold the lock while posting the next task.
+ g_last_created->GetTaskRunner()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&AudioManagerHelper::UpdateLastAudioThreadTimeTick,
+ base::Unretained(this)),
+ max_hung_task_time_ / 10);
+ }
+
+ AudioLogFactory* fake_log_factory() { return &fake_log_factory_; }
+
+#if defined(OS_WIN)
+ // This should be called before creating an AudioManager in tests to ensure
+ // that the creating thread is COM initialized.
+ void InitializeCOMForTesting() {
+ com_initializer_for_testing_.reset(new base::win::ScopedCOMInitializer());
+ }
+#endif
+
+ private:
+ FakeAudioLogFactory fake_log_factory_;
+
+ const base::TimeDelta max_hung_task_time_;
+ scoped_refptr<base::SingleThreadTaskRunner> monitor_task_runner_;
+
+ base::Lock hang_lock_;
+ bool hang_detection_enabled_;
+ base::TimeTicks last_audio_thread_timer_tick_;
+ int hang_failures_;
+
+#if defined(OS_WIN)
+ scoped_ptr<base::win::ScopedCOMInitializer> com_initializer_for_testing_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerHelper);
+};
+
+static bool g_hang_monitor_enabled = false;
+
+static base::LazyInstance<AudioManagerHelper>::Leaky g_helper =
LAZY_INSTANCE_INITIALIZER;
-}
+} // namespace
// Forward declaration of the platform specific AudioManager factory function.
AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory);
@@ -29,15 +166,62 @@ AudioManager::~AudioManager() {
}
// static
+void AudioManager::SetFactory(AudioManagerFactory* factory) {
+ CHECK(factory);
+ CHECK(!g_last_created);
+ CHECK(!g_audio_manager_factory);
+ g_audio_manager_factory = factory;
+}
+
+// static
+void AudioManager::ResetFactoryForTesting() {
+ if (g_audio_manager_factory) {
+ delete g_audio_manager_factory;
+ g_audio_manager_factory = nullptr;
+ }
+}
+
+// static
AudioManager* AudioManager::Create(AudioLogFactory* audio_log_factory) {
CHECK(!g_last_created);
- g_last_created = CreateAudioManager(audio_log_factory);
+ if (g_audio_manager_factory)
+ g_last_created = g_audio_manager_factory->CreateInstance(audio_log_factory);
+ else
+ g_last_created = CreateAudioManager(audio_log_factory);
+
return g_last_created;
}
// static
+AudioManager* AudioManager::CreateWithHangTimer(
+ AudioLogFactory* audio_log_factory,
+ const scoped_refptr<base::SingleThreadTaskRunner>& monitor_task_runner) {
+ AudioManager* manager = Create(audio_log_factory);
+ if (g_hang_monitor_enabled ||
+ base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableAudioHangMonitor)) {
+ g_helper.Pointer()->StartHangTimer(monitor_task_runner);
+ }
+ return manager;
+}
+
+// static
AudioManager* AudioManager::CreateForTesting() {
- return Create(g_fake_log_factory.Pointer());
+#if defined(OS_WIN)
+ g_helper.Pointer()->InitializeCOMForTesting();
+#endif
+ return Create(g_helper.Pointer()->fake_log_factory());
+}
+
+// static
+void AudioManager::EnableHangMonitor() {
+ CHECK(!g_last_created);
+// On OSX the audio thread is the UI thread, for which a hang monitor is not
+// necessary or recommended. If it's manually requested, we should allow it
+// to start though.
+#if !defined(OS_MACOSX)
+ g_hang_monitor_enabled = true;
+#endif
}
// static
diff --git a/chromium/media/audio/audio_manager.h b/chromium/media/audio/audio_manager.h
index 7705cdb804e..1391742fe5f 100644
--- a/chromium/media/audio/audio_manager.h
+++ b/chromium/media/audio/audio_manager.h
@@ -21,22 +21,50 @@ class SingleThreadTaskRunner;
namespace media {
class AudioInputStream;
+class AudioManagerFactory;
class AudioOutputStream;
// Manages all audio resources. Provides some convenience functions that avoid
// the need to provide iterators over the existing streams.
class MEDIA_EXPORT AudioManager {
- public:
- virtual ~AudioManager();
+ public:
+ virtual ~AudioManager();
+
+ // This provides an alternative to the statically linked factory method used
+ // to create AudioManager. This is useful for dynamically-linked third
+ // party clients seeking to provide a platform-specific implementation of
+ // AudioManager. After this is called, all static AudioManager::Create*
+ // methods will return an instance of AudioManager provided by |factory|. This
+ // call may be made at most once per process, and before any calls to static
+ // AudioManager::Create* methods. This method takes ownership of |factory|,
+ // which must not be NULL.
+ static void SetFactory(AudioManagerFactory* factory);
// Construct the audio manager; only one instance is allowed. The manager
// will forward CreateAudioLog() calls to the provided AudioLogFactory; as
// such |audio_log_factory| must outlive the AudioManager.
static AudioManager* Create(AudioLogFactory* audio_log_factory);
+ // Similar to Create() except also schedules a monitor on the given task
+ // runner to ensure the audio thread is not stuck for more than 60 seconds; if
+ // a hang is detected, the process will be crashed. See EnableHangMonitor().
+ static AudioManager* CreateWithHangTimer(
+ AudioLogFactory* audio_log_factory,
+ const scoped_refptr<base::SingleThreadTaskRunner>& monitor_task_runner);
+
// Similar to Create() except uses a FakeAudioLogFactory for testing.
static AudioManager* CreateForTesting();
+ // Enables the hang monitor for the AudioManager once it's created. Must be
+ // called before the AudioManager is created. CreateWithHangTimer() requires
+ // either switches::kEnableAudioHangMonitor to be present or this to have been
+ // called previously to start the hang monitor. Does nothing on OSX.
+ static void EnableHangMonitor();
+
+ // Should only be used for testing. Resets a previously-set
+ // AudioManagerFactory. The instance of AudioManager is not affected.
+ static void ResetFactoryForTesting();
+
// Returns the pointer to the last created instance, or NULL if not yet
// created. This is a utility method for the code outside of media directory,
// like src/chrome.
@@ -123,7 +151,8 @@ class MEDIA_EXPORT AudioManager {
// Do not free the returned AudioInputStream. It is owned by AudioManager.
// When you are done with it, call |Stop()| and |Close()| to release it.
virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) = 0;
+ const AudioParameters& params,
+ const std::string& device_id) = 0;
// Returns the task runner used for audio IO.
virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
index 275ec33bc03..fe7c12e35d5 100644
--- a/chromium/media/audio/audio_manager_base.cc
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -7,7 +7,9 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
+#include "base/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "media/audio/audio_output_dispatcher_impl.h"
#include "media/audio/audio_output_proxy.h"
@@ -90,7 +92,7 @@ AudioManagerBase::AudioManagerBase(AudioLogFactory* audio_log_factory)
// thread leads to crashes and odd behavior. See http://crbug.com/158170.
// TODO(dalecurtis): We should require the message loop to be passed in.
if (base::MessageLoopForUI::IsCurrent()) {
- task_runner_ = base::MessageLoopProxy::current();
+ task_runner_ = base::ThreadTaskRunnerHandle::Get();
return;
}
#endif
@@ -198,6 +200,9 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
return NULL;
}
+ DVLOG(2) << "Creating a new AudioInputStream with buffer size = "
+ << params.frames_per_buffer();
+
AudioInputStream* stream;
switch (params.format()) {
case AudioParameters::AUDIO_PCM_LINEAR:
@@ -385,7 +390,7 @@ std::string AudioManagerBase::GetDefaultOutputDeviceID() {
}
int AudioManagerBase::GetUserBufferSize() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
int buffer_size = 0;
std::string buffer_size_str(cmd_line->GetSwitchValueASCII(
switches::kAudioBufferSize));
diff --git a/chromium/media/audio/audio_manager_factory.h b/chromium/media/audio/audio_manager_factory.h
new file mode 100644
index 00000000000..11f1338d0ba
--- /dev/null
+++ b/chromium/media/audio/audio_manager_factory.h
@@ -0,0 +1,28 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_MANAGER_FACTORY_H_
+#define MEDIA_AUDIO_AUDIO_MANAGER_FACTORY_H_
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioManager;
+class AudioLogFactory;
+
+// Allows a platform-specific implementation of AudioManager to be provided in
+// place of the default implementation at run-time.
+class MEDIA_EXPORT AudioManagerFactory {
+ public:
+ virtual ~AudioManagerFactory() {}
+
+ // Creates an instance of AudioManager implementation. Caller owns the
+ // returned instance. |audio_log_factory| must outlive the returned instance.
+ virtual AudioManager* CreateInstance(AudioLogFactory* audio_log_factory) = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_MANAGER_FACTORY_H_
diff --git a/chromium/media/audio/audio_manager_factory_unittest.cc b/chromium/media/audio/audio_manager_factory_unittest.cc
new file mode 100644
index 00000000000..1d13c2e0269
--- /dev/null
+++ b/chromium/media/audio/audio_manager_factory_unittest.cc
@@ -0,0 +1,58 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_manager_factory.h"
+#include "media/audio/fake_audio_log_factory.h"
+#include "media/audio/fake_audio_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace {
+
+class FakeAudioManagerFactory : public AudioManagerFactory {
+ public:
+ FakeAudioManagerFactory() {}
+ ~FakeAudioManagerFactory() override {}
+
+ AudioManager* CreateInstance(AudioLogFactory* audio_log_factory) override {
+ // |created_instance_| is used for verifying. Ownership is transferred to
+ // caller.
+ created_instance_ = new FakeAudioManager(audio_log_factory);
+ return created_instance_;
+ }
+
+ AudioManager* created_instance() { return created_instance_; }
+
+ private:
+ AudioManager* created_instance_;
+};
+
+} // namespace
+
+// Verifies that SetFactory has the intended effect.
+TEST(AudioManagerFactoryTest, CreateInstance) {
+ // Create an audio manager and verify that it is not null.
+ scoped_ptr<AudioManager> manager(AudioManager::CreateForTesting());
+ ASSERT_NE(nullptr, manager.get());
+ manager.reset();
+
+ // Set the factory. Note that ownership of |factory| is transferred to
+ // AudioManager.
+ FakeAudioManagerFactory* factory = new FakeAudioManagerFactory();
+ AudioManager::SetFactory(factory);
+
+ // Create the AudioManager instance. Verify that it matches the instance
+ // provided by the factory.
+ manager.reset(AudioManager::CreateForTesting());
+ ASSERT_NE(nullptr, manager.get());
+ ASSERT_EQ(factory->created_instance(), manager.get());
+
+ // Reset AudioManagerFactory to prevent factory from persisting to other
+ // tests on the same process. |manager| will reset when scope exits.
+ AudioManager::ResetFactoryForTesting();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_manager_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index eafdaab1a8c..48bb51cee5c 100644
--- a/chromium/media/audio/audio_manager_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -9,6 +9,7 @@
#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/fake_audio_log_factory.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -32,12 +33,7 @@ namespace media {
// Windows.
class AudioManagerTest : public ::testing::Test {
protected:
- AudioManagerTest()
- : audio_manager_(AudioManager::CreateForTesting())
-#if defined(OS_WIN)
- , com_init_(base::win::ScopedCOMInitializer::kMTA)
-#endif
- {
+ AudioManagerTest() : audio_manager_(AudioManager::CreateForTesting()) {
// Wait for audio thread initialization to complete. Otherwise the
// enumeration type may not have been set yet.
base::WaitableEvent event(false, false);
@@ -46,8 +42,6 @@ class AudioManagerTest : public ::testing::Test {
event.Wait();
}
- AudioManager* audio_manager() { return audio_manager_.get(); };
-
#if defined(OS_WIN)
bool SetMMDeviceEnumeration() {
AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
@@ -82,7 +76,7 @@ class AudioManagerTest : public ::testing::Test {
// Helper method which verifies that the device list starts with a valid
// default record followed by non-default device names.
static void CheckDeviceNames(const AudioDeviceNames& device_names) {
- VLOG(2) << "Got " << device_names.size() << " audio devices.";
+ DVLOG(2) << "Got " << device_names.size() << " audio devices.";
if (!device_names.empty()) {
AudioDeviceNames::const_iterator it = device_names.begin();
@@ -97,8 +91,8 @@ class AudioManagerTest : public ::testing::Test {
while (it != device_names.end()) {
EXPECT_FALSE(it->device_name.empty());
EXPECT_FALSE(it->unique_id.empty());
- VLOG(2) << "Device ID(" << it->unique_id
- << "), label: " << it->device_name;
+ DVLOG(2) << "Device ID(" << it->unique_id
+ << "), label: " << it->device_name;
EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
it->device_name);
EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
@@ -113,11 +107,11 @@ class AudioManagerTest : public ::testing::Test {
}
}
- bool CanRunInputTest() {
+ bool InputDevicesAvailable() {
return audio_manager_->HasAudioInputDevices();
}
- bool CanRunOutputTest() {
+ bool OutputDevicesAvailable() {
return audio_manager_->HasAudioOutputDevices();
}
@@ -133,7 +127,7 @@ class AudioManagerTest : public ::testing::Test {
// Synchronously runs the provided callback/closure on the audio thread.
void RunOnAudioThread(const base::Closure& closure) {
- if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ if (!audio_manager_->GetTaskRunner()->BelongsToCurrentThread()) {
base::WaitableEvent event(false, false);
audio_manager_->GetTaskRunner()->PostTask(
FROM_HERE,
@@ -149,42 +143,35 @@ class AudioManagerTest : public ::testing::Test {
void RunOnAudioThreadImpl(const base::Closure& closure,
base::WaitableEvent* event) {
- DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
closure.Run();
event->Signal();
}
FakeAudioLogFactory fake_audio_log_factory_;
scoped_ptr<AudioManager> audio_manager_;
-
-#if defined(OS_WIN)
- // The MMDevice API requires COM to be initialized on the current thread.
- base::win::ScopedCOMInitializer com_init_;
-#endif
};
// Test that devices can be enumerated.
TEST_F(AudioManagerTest, EnumerateInputDevices) {
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioDeviceNames device_names;
RunOnAudioThread(
base::Bind(&AudioManager::GetAudioInputDeviceNames,
- base::Unretained(audio_manager()),
+ base::Unretained(audio_manager_.get()),
&device_names));
CheckDeviceNames(device_names);
}
// Test that devices can be enumerated.
TEST_F(AudioManagerTest, EnumerateOutputDevices) {
- if (!CanRunOutputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
AudioDeviceNames device_names;
RunOnAudioThread(
base::Bind(&AudioManager::GetAudioOutputDeviceNames,
- base::Unretained(audio_manager()),
+ base::Unretained(audio_manager_.get()),
&device_names));
CheckDeviceNames(device_names);
}
@@ -197,8 +184,7 @@ TEST_F(AudioManagerTest, EnumerateOutputDevices) {
// Override default enumeration API and force usage of Windows MMDevice.
// This test will only run on Windows Vista and higher.
TEST_F(AudioManagerTest, EnumerateInputDevicesWinMMDevice) {
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioDeviceNames device_names;
if (!SetMMDeviceEnumeration()) {
@@ -211,8 +197,7 @@ TEST_F(AudioManagerTest, EnumerateInputDevicesWinMMDevice) {
}
TEST_F(AudioManagerTest, EnumerateOutputDevicesWinMMDevice) {
- if (!CanRunOutputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
AudioDeviceNames device_names;
if (!SetMMDeviceEnumeration()) {
@@ -227,8 +212,7 @@ TEST_F(AudioManagerTest, EnumerateOutputDevicesWinMMDevice) {
// Override default enumeration API and force usage of Windows Wave.
// This test will run on Windows XP, Windows Vista and Windows 7.
TEST_F(AudioManagerTest, EnumerateInputDevicesWinWave) {
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioDeviceNames device_names;
SetWaveEnumeration();
@@ -237,8 +221,7 @@ TEST_F(AudioManagerTest, EnumerateInputDevicesWinWave) {
}
TEST_F(AudioManagerTest, EnumerateOutputDevicesWinWave) {
- if (!CanRunOutputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
AudioDeviceNames device_names;
SetWaveEnumeration();
@@ -247,8 +230,7 @@ TEST_F(AudioManagerTest, EnumerateOutputDevicesWinWave) {
}
TEST_F(AudioManagerTest, WinXPDeviceIdUnchanged) {
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioDeviceNames xp_device_names;
SetWaveEnumeration();
@@ -264,8 +246,7 @@ TEST_F(AudioManagerTest, WinXPDeviceIdUnchanged) {
}
TEST_F(AudioManagerTest, ConvertToWinXPInputDeviceId) {
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
if (!SetMMDeviceEnumeration()) {
// Usage of MMDevice will fail on XP and lower.
@@ -301,8 +282,7 @@ TEST_F(AudioManagerTest, ConvertToWinXPInputDeviceId) {
// test Pulseaudio.
TEST_F(AudioManagerTest, EnumerateInputDevicesPulseaudio) {
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
CreateAudioManagerForTesting<AudioManagerPulse>();
if (audio_manager_.get()) {
@@ -315,8 +295,7 @@ TEST_F(AudioManagerTest, EnumerateInputDevicesPulseaudio) {
}
TEST_F(AudioManagerTest, EnumerateOutputDevicesPulseaudio) {
- if (!CanRunOutputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
CreateAudioManagerForTesting<AudioManagerPulse>();
if (audio_manager_.get()) {
@@ -335,10 +314,9 @@ TEST_F(AudioManagerTest, EnumerateOutputDevicesPulseaudio) {
// test Alsa.
TEST_F(AudioManagerTest, EnumerateInputDevicesAlsa) {
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
- VLOG(2) << "Testing AudioManagerAlsa.";
+ DVLOG(2) << "Testing AudioManagerAlsa.";
CreateAudioManagerForTesting<AudioManagerAlsa>();
AudioDeviceNames device_names;
audio_manager_->GetAudioInputDeviceNames(&device_names);
@@ -346,10 +324,9 @@ TEST_F(AudioManagerTest, EnumerateInputDevicesAlsa) {
}
TEST_F(AudioManagerTest, EnumerateOutputDevicesAlsa) {
- if (!CanRunOutputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
- VLOG(2) << "Testing AudioManagerAlsa.";
+ DVLOG(2) << "Testing AudioManagerAlsa.";
CreateAudioManagerForTesting<AudioManagerAlsa>();
AudioDeviceNames device_names;
audio_manager_->GetAudioOutputDeviceNames(&device_names);
@@ -359,8 +336,7 @@ TEST_F(AudioManagerTest, EnumerateOutputDevicesAlsa) {
TEST_F(AudioManagerTest, GetDefaultOutputStreamParameters) {
#if defined(OS_WIN) || defined(OS_MACOSX)
- if (!CanRunInputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioParameters params = audio_manager_->GetDefaultOutputStreamParameters();
EXPECT_TRUE(params.IsValid());
@@ -369,8 +345,7 @@ TEST_F(AudioManagerTest, GetDefaultOutputStreamParameters) {
TEST_F(AudioManagerTest, GetAssociatedOutputDeviceID) {
#if defined(OS_WIN) || defined(OS_MACOSX)
- if (!CanRunInputTest() || !CanRunOutputTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable() && OutputDevicesAvailable());
AudioDeviceNames device_names;
audio_manager_->GetAudioInputDeviceNames(&device_names);
@@ -383,7 +358,7 @@ TEST_F(AudioManagerTest, GetAssociatedOutputDeviceID) {
std::string output_device_id(
audio_manager_->GetAssociatedOutputDeviceID(it->unique_id));
if (!output_device_id.empty()) {
- VLOG(2) << it->unique_id << " matches with " << output_device_id;
+ DVLOG(2) << it->unique_id << " matches with " << output_device_id;
found_an_associated_device = true;
}
}
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
index a38bc526f29..8af0aabb246 100644
--- a/chromium/media/audio/audio_output_controller.cc
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -5,14 +5,12 @@
#include "media/audio/audio_output_controller.h"
#include "base/bind.h"
-#include "base/debug/trace_event.h"
-#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/task_runner_util.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
-#include "build/build_config.h"
-#include "media/base/scoped_histogram_timer.h"
+#include "base/trace_event/trace_event.h"
using base::TimeDelta;
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
index 23dc897d6ef..298c56576dc 100644
--- a/chromium/media/audio/audio_output_controller.h
+++ b/chromium/media/audio/audio_output_controller.h
@@ -67,7 +67,6 @@ class MEDIA_EXPORT AudioOutputController
virtual void OnPlaying() = 0;
virtual void OnPaused() = 0;
virtual void OnError() = 0;
- virtual void OnDeviceChange(int new_buffer_size, int new_sample_rate) = 0;
protected:
virtual ~EventHandler() {}
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
index d15889886a4..3e9229da9ba 100644
--- a/chromium/media/audio/audio_output_controller_unittest.cc
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -41,7 +41,6 @@ class MockAudioOutputControllerEventHandler
MOCK_METHOD0(OnPlaying, void());
MOCK_METHOD0(OnPaused, void());
MOCK_METHOD0(OnError, void());
- MOCK_METHOD2(OnDeviceChange, void(int new_buffer_size, int new_sample_rate));
private:
DISALLOW_COPY_AND_ASSIGN(MockAudioOutputControllerEventHandler);
@@ -189,7 +188,7 @@ class AudioOutputControllerTest : public testing::Test {
void ReadDivertedAudioData() {
scoped_ptr<AudioBus> dest = AudioBus::Create(params_);
- ASSERT_TRUE(!!mock_stream_.callback());
+ ASSERT_TRUE(mock_stream_.callback());
const int frames_read =
mock_stream_.callback()->OnMoreData(dest.get(), 0);
EXPECT_LT(0, frames_read);
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
index c804ae7690e..dc83328ca55 100644
--- a/chromium/media/audio/audio_output_device.cc
+++ b/chromium/media/audio/audio_output_device.cc
@@ -4,10 +4,9 @@
#include "media/audio/audio_output_device.h"
-#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "media/audio/audio_output_controller.h"
#include "media/base/limits.h"
@@ -33,6 +32,7 @@ class AudioOutputDevice::AudioThreadCallback
private:
AudioRendererSink::RenderCallback* render_callback_;
scoped_ptr<AudioBus> output_bus_;
+ uint64 callback_num_;
DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
};
@@ -50,10 +50,10 @@ AudioOutputDevice::AudioOutputDevice(
// The correctness of the code depends on the relative values assigned in the
// State enum.
- COMPILE_ASSERT(IPC_CLOSED < IDLE, invalid_enum_value_assignment_0);
- COMPILE_ASSERT(IDLE < CREATING_STREAM, invalid_enum_value_assignment_1);
- COMPILE_ASSERT(CREATING_STREAM < PAUSED, invalid_enum_value_assignment_2);
- COMPILE_ASSERT(PAUSED < PLAYING, invalid_enum_value_assignment_3);
+ static_assert(IPC_CLOSED < IDLE, "invalid enum value assignment 0");
+ static_assert(IDLE < CREATING_STREAM, "invalid enum value assignment 1");
+ static_assert(CREATING_STREAM < PAUSED, "invalid enum value assignment 2");
+ static_assert(PAUSED < PLAYING, "invalid enum value assignment 3");
}
void AudioOutputDevice::InitializeWithSessionId(const AudioParameters& params,
@@ -128,6 +128,8 @@ void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) {
void AudioOutputDevice::PlayOnIOThread() {
DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == PAUSED) {
+ TRACE_EVENT_ASYNC_BEGIN0(
+ "audio", "StartingPlayback", audio_callback_.get());
ipc_->PlayStream();
state_ = PLAYING;
play_on_start_ = false;
@@ -139,6 +141,8 @@ void AudioOutputDevice::PlayOnIOThread() {
void AudioOutputDevice::PauseOnIOThread() {
DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == PLAYING) {
+ TRACE_EVENT_ASYNC_END0(
+ "audio", "StartingPlayback", audio_callback_.get());
ipc_->PauseStream();
state_ = PAUSED;
}
@@ -270,7 +274,8 @@ AudioOutputDevice::AudioThreadCallback::AudioThreadCallback(
int memory_length,
AudioRendererSink::RenderCallback* render_callback)
: AudioDeviceThread::Callback(audio_parameters, memory, memory_length, 1),
- render_callback_(render_callback) {}
+ render_callback_(render_callback),
+ callback_num_(0) {}
AudioOutputDevice::AudioThreadCallback::~AudioThreadCallback() {
}
@@ -289,7 +294,16 @@ void AudioOutputDevice::AudioThreadCallback::Process(uint32 pending_data) {
// Convert the number of pending bytes in the render buffer into milliseconds.
int audio_delay_milliseconds = pending_data / bytes_per_ms_;
- TRACE_EVENT0("audio", "AudioOutputDevice::FireRenderCallback");
+ callback_num_++;
+ TRACE_EVENT1("audio", "AudioOutputDevice::FireRenderCallback",
+ "callback_num", callback_num_);
+
+ // When playback starts, we get an immediate callback to Process to make sure
+ // that we have some data, we'll get another one after the device is awake and
+ // ingesting data, which is what we want to track with this trace.
+ if (callback_num_ == 2) {
+ TRACE_EVENT_ASYNC_END0("audio", "StartingPlayback", this);
+ }
// Update the audio-delay measurement then ask client to render audio. Since
// |output_bus_| is wrapping the shared memory the Render() call is writing
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
index 0cb3db85cad..a2d9fc214dd 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.cc
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -10,7 +10,6 @@
#include "base/compiler_specific.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
-#include "media/audio/audio_io.h"
#include "media/audio/audio_output_proxy.h"
namespace media {
@@ -130,6 +129,10 @@ void AudioOutputDispatcherImpl::Shutdown() {
DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
}
+bool AudioOutputDispatcherImpl::HasOutputProxies() const {
+ return idle_proxies_ || !proxy_to_physical_map_.empty();
+}
+
bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
DCHECK(task_runner_->BelongsToCurrentThread());
AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
index 1aa5a32dd59..d27178458fe 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.h
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -60,6 +60,9 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
void Shutdown() override;
+ // Returns true if there are any open AudioOutputProxy objects.
+ bool HasOutputProxies() const;
+
private:
friend class base::RefCountedThreadSafe<AudioOutputDispatcherImpl>;
~AudioOutputDispatcherImpl() override;
diff --git a/chromium/media/audio/audio_output_proxy_unittest.cc b/chromium/media/audio/audio_output_proxy_unittest.cc
index 66f8987a0aa..b0750cc0a88 100644
--- a/chromium/media/audio/audio_output_proxy_unittest.cc
+++ b/chromium/media/audio/audio_output_proxy_unittest.cc
@@ -674,4 +674,57 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
EXPECT_TRUE(stream2.start_called());
}
+// Simulate failures to open both the low latency and the fallback high latency
+// stream and ensure AudioOutputResampler falls back to a fake stream. Ensure
+// that after the close delay elapses, opening another stream succeeds with a
+// non-fake stream.
+TEST_F(AudioOutputResamplerTest, FallbackRecovery) {
+ MockAudioOutputStream fake_stream(&manager_, params_);
+
+ // Trigger the fallback mechanism until a fake output stream is created.
+#if defined(OS_WIN)
+ static const int kFallbackCount = 2;
+#else
+ static const int kFallbackCount = 1;
+#endif
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .Times(kFallbackCount)
+ .WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
+ EXPECT_CALL(manager(),
+ MakeAudioOutputStream(
+ AllOf(testing::Property(&AudioParameters::format,
+ AudioParameters::AUDIO_FAKE),
+ testing::Property(&AudioParameters::sample_rate,
+ params_.sample_rate()),
+ testing::Property(&AudioParameters::frames_per_buffer,
+ params_.frames_per_buffer())),
+ _)).WillOnce(Return(&fake_stream));
+ EXPECT_CALL(fake_stream, Open()).WillOnce(Return(true));
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy->Open());
+ CloseAndWaitForCloseTimer(proxy, &fake_stream);
+
+ // Once all proxies have been closed, AudioOutputResampler will start the
+ // reinitialization timer and execute it after the close delay elapses.
+ base::RunLoop run_loop;
+ message_loop_.PostDelayedTask(
+ FROM_HERE, run_loop.QuitClosure(),
+ base::TimeDelta::FromMilliseconds(2 * kTestCloseDelayMs));
+ run_loop.Run();
+
+ // Verify a non-fake stream can be created.
+ MockAudioOutputStream real_stream(&manager_, params_);
+ EXPECT_CALL(manager(),
+ MakeAudioOutputStream(
+ testing::Property(&AudioParameters::format,
+ testing::Ne(AudioParameters::AUDIO_FAKE)),
+ _)).WillOnce(Return(&real_stream));
+
+ // Stream1 should be able to successfully open and start.
+ EXPECT_CALL(real_stream, Open()).WillOnce(Return(true));
+ proxy = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy->Open());
+ CloseAndWaitForCloseTimer(proxy, &real_stream);
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
index 7aa32848f32..6fb970b9f2b 100644
--- a/chromium/media/audio/audio_output_resampler.cc
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -10,10 +10,8 @@
#include "base/metrics/histogram.h"
#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
-#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_output_dispatcher_impl.h"
#include "media/audio/audio_output_proxy.h"
#include "media/audio/sample_rates.h"
#include "media/base/audio_converter.h"
@@ -153,7 +151,13 @@ AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
: AudioOutputDispatcher(audio_manager, input_params, output_device_id),
close_delay_(close_delay),
output_params_(output_params),
- streams_opened_(false) {
+ original_output_params_(output_params),
+ streams_opened_(false),
+ reinitialize_timer_(FROM_HERE,
+ close_delay_,
+ base::Bind(&AudioOutputResampler::Reinitialize,
+ base::Unretained(this)),
+ false) {
DCHECK(input_params.IsValid());
DCHECK(output_params.IsValid());
DCHECK_EQ(output_params_.format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
@@ -168,6 +172,24 @@ AudioOutputResampler::~AudioOutputResampler() {
DCHECK(callbacks_.empty());
}
+void AudioOutputResampler::Reinitialize() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(streams_opened_);
+
+ // We can only reinitialize the dispatcher if it has no active proxies. Check
+ // if one has been created since the reinitialization timer was started.
+ if (dispatcher_->HasOutputProxies())
+ return;
+
+ // Log a trace event so we can get feedback in the field when this happens.
+ TRACE_EVENT0("audio", "AudioOutputResampler::Reinitialize");
+
+ dispatcher_->Shutdown();
+ output_params_ = original_output_params_;
+ streams_opened_ = false;
+ Initialize();
+}
+
void AudioOutputResampler::Initialize() {
DCHECK(!streams_opened_);
DCHECK(callbacks_.empty());
@@ -282,6 +304,14 @@ void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
delete it->second;
callbacks_.erase(it);
}
+
+ // Start the reinitialization timer if there are no active proxies and we're
+ // not using the originally requested output parameters. This allows us to
+ // recover from transient output creation errors.
+ if (!dispatcher_->HasOutputProxies() && callbacks_.empty() &&
+ !output_params_.Equals(original_output_params_)) {
+ reinitialize_timer_.Reset();
+ }
}
void AudioOutputResampler::Shutdown() {
diff --git a/chromium/media/audio/audio_output_resampler.h b/chromium/media/audio/audio_output_resampler.h
index 4c7be29830e..18d4905b801 100644
--- a/chromium/media/audio/audio_output_resampler.h
+++ b/chromium/media/audio/audio_output_resampler.h
@@ -10,9 +10,10 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
+#include "base/timer/timer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
-#include "media/audio/audio_output_dispatcher.h"
+#include "media/audio/audio_output_dispatcher_impl.h"
#include "media/audio/audio_parameters.h"
namespace media {
@@ -29,12 +30,7 @@ class OnMoreDataConverter;
//
// AOR will automatically fall back from AUDIO_PCM_LOW_LATENCY to
// AUDIO_PCM_LINEAR if the output device fails to open at the requested output
-// parameters.
-//
-// TODO(dalecurtis): Ideally the low latency path will be as reliable as the
-// high latency path once we have channel mixing and support querying for the
-// hardware's configured bit depth. Monitor the UMA stats for fallback and
-// remove fallback support once it's stable. http://crbug.com/148418
+// parameters. If opening still fails, it will fallback to AUDIO_FAKE.
class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
public:
AudioOutputResampler(AudioManager* audio_manager,
@@ -60,11 +56,14 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
// appropriate output parameters in error situations.
void SetupFallbackParams();
- // Used to initialize and reinitialize |dispatcher_|.
+ // Used to reinitialize |dispatcher_|.
+ void Reinitialize();
+
+ // Used to initialize |dispatcher_|.
void Initialize();
// Dispatcher to proxy all AudioOutputDispatcher calls too.
- scoped_refptr<AudioOutputDispatcher> dispatcher_;
+ scoped_refptr<AudioOutputDispatcherImpl> dispatcher_;
// Map of outstanding OnMoreDataConverter objects. A new object is created
// on every StartStream() call and destroyed on CloseStream().
@@ -74,13 +73,22 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
// Used by AudioOutputDispatcherImpl; kept so we can reinitialize on the fly.
base::TimeDelta close_delay_;
- // AudioParameters used to setup the output stream.
+ // AudioParameters used to setup the output stream; changed upon fallback.
AudioParameters output_params_;
+ // The original AudioParameters we were constructed with.
+ const AudioParameters original_output_params_;
+
// Whether any streams have been opened through |dispatcher_|, if so we can't
// fallback on future OpenStream() failures.
bool streams_opened_;
+ // The reinitialization timer provides a way to recover from temporary failure
+ // states by clearing the dispatcher if all proxies have been closed and none
+ // have been created within |close_delay_|. Without this, audio may be lost
+ // to a fake stream indefinitely for transient errors.
+ base::Timer reinitialize_timer_;
+
DISALLOW_COPY_AND_ASSIGN(AudioOutputResampler);
};
diff --git a/chromium/media/audio/audio_parameters.cc b/chromium/media/audio/audio_parameters.cc
index 7dd26e1ba53..c9dcdeb7e5f 100644
--- a/chromium/media/audio/audio_parameters.cc
+++ b/chromium/media/audio/audio_parameters.cc
@@ -54,16 +54,11 @@ AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
frames_per_buffer_(frames_per_buffer),
channels_(channels),
effects_(effects) {
- if (channel_layout != CHANNEL_LAYOUT_DISCRETE)
- DCHECK_EQ(channels, ChannelLayoutToChannelCount(channel_layout));
}
void AudioParameters::Reset(Format format, ChannelLayout channel_layout,
int channels, int sample_rate,
int bits_per_sample, int frames_per_buffer) {
- if (channel_layout != CHANNEL_LAYOUT_DISCRETE)
- DCHECK_EQ(channels, ChannelLayoutToChannelCount(channel_layout));
-
format_ = format;
channel_layout_ = channel_layout;
channels_ = channels;
@@ -84,7 +79,9 @@ bool AudioParameters::IsValid() const {
(bits_per_sample_ > 0) &&
(bits_per_sample_ <= media::limits::kMaxBitsPerSample) &&
(frames_per_buffer_ > 0) &&
- (frames_per_buffer_ <= media::limits::kMaxSamplesPerPacket);
+ (frames_per_buffer_ <= media::limits::kMaxSamplesPerPacket) &&
+ (channel_layout_ == CHANNEL_LAYOUT_DISCRETE ||
+ channels_ == ChannelLayoutToChannelCount(channel_layout_));
}
std::string AudioParameters::AsHumanReadableString() const {
diff --git a/chromium/media/audio/audio_parameters.h b/chromium/media/audio/audio_parameters.h
index b5ac71d8865..9267232acf9 100644
--- a/chromium/media/audio/audio_parameters.h
+++ b/chromium/media/audio/audio_parameters.h
@@ -53,6 +53,7 @@ class MEDIA_EXPORT AudioParameters {
ECHO_CANCELLER = 0x1,
DUCKING = 0x2, // Enables ducking if the OS supports it.
KEYBOARD_MIC = 0x4,
+ HOTWORD = 0x8,
};
AudioParameters();
diff --git a/chromium/media/audio/audio_parameters_unittest.cc b/chromium/media/audio/audio_parameters_unittest.cc
index 390b205a091..92677eb6c6e 100644
--- a/chromium/media/audio/audio_parameters_unittest.cc
+++ b/chromium/media/audio/audio_parameters_unittest.cc
@@ -203,4 +203,25 @@ TEST(AudioParameters, Compare) {
}
}
+TEST(AudioParameters, Constructor_ValidChannelCounts) {
+ int expected_channels = 8;
+ ChannelLayout expected_layout = CHANNEL_LAYOUT_5_1;
+
+ AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ expected_layout, expected_channels, 44100, 16, 880,
+ AudioParameters::NO_EFFECTS);
+
+ EXPECT_EQ(expected_channels, params.channels());
+ EXPECT_EQ(expected_layout, params.channel_layout());
+ EXPECT_FALSE(params.IsValid());
+
+ expected_layout = CHANNEL_LAYOUT_DISCRETE;
+ params.Reset(AudioParameters::AUDIO_PCM_LOW_LATENCY, expected_layout,
+ expected_channels, 44100, 16, 880);
+
+ EXPECT_EQ(expected_channels, params.channels());
+ EXPECT_EQ(expected_layout, params.channel_layout());
+ EXPECT_TRUE(params.IsValid());
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_power_monitor.cc b/chromium/media/audio/audio_power_monitor.cc
index b0b17971b4f..efbad6c5006 100644
--- a/chromium/media/audio/audio_power_monitor.cc
+++ b/chromium/media/audio/audio_power_monitor.cc
@@ -7,7 +7,6 @@
#include <algorithm>
#include <cmath>
-#include "base/float_util.h"
#include "base/logging.h"
#include "base/time/time.h"
#include "media/base/audio_bus.h"
@@ -53,7 +52,7 @@ void AudioPowerMonitor::Scan(const AudioBus& buffer, int num_frames) {
const std::pair<float, float> ewma_and_max = vector_math::EWMAAndMaxPower(
average_power_, buffer.channel(i), num_frames, sample_weight_);
// If data in audio buffer is garbage, ignore its effect on the result.
- if (!base::IsFinite(ewma_and_max.first)) {
+ if (!std::isfinite(ewma_and_max.first)) {
sum_power += average_power_;
} else {
sum_power += ewma_and_max.first;
diff --git a/chromium/media/audio/audio_power_monitor_unittest.cc b/chromium/media/audio/audio_power_monitor_unittest.cc
index 1289de0ab47..2b30578d142 100644
--- a/chromium/media/audio/audio_power_monitor_unittest.cc
+++ b/chromium/media/audio/audio_power_monitor_unittest.cc
@@ -91,9 +91,9 @@ class TestScenario {
// should make progress towards the goal value.
class MeasurementObserver {
public:
- MeasurementObserver(float goal_power_measurement, bool goal_clipped)
+ explicit MeasurementObserver(float goal_power_measurement)
: goal_power_measurement_(goal_power_measurement),
- goal_clipped_(goal_clipped), measurement_count_(0),
+ measurement_count_(0),
last_power_measurement_(AudioPowerMonitor::zero_power()),
last_clipped_(false) {}
@@ -140,7 +140,6 @@ class MeasurementObserver {
private:
const float goal_power_measurement_;
- const bool goal_clipped_;
int measurement_count_;
bool measurements_should_increase_;
float last_power_measurement_;
@@ -163,7 +162,7 @@ class AudioPowerMonitorTest : public ::testing::TestWithParam<TestScenario> {
// Feed the AudioPowerMonitor, read measurements from it, and record them in
// MeasurementObserver.
static const int kNumFeedIters = 100;
- MeasurementObserver observer(power, clipped);
+ MeasurementObserver observer(power);
for (int i = 0; i < kNumFeedIters; ++i) {
power_monitor_.Scan(bus, bus.frames());
const std::pair<float, bool>& reading =
diff --git a/chromium/media/audio/audio_unittest_util.cc b/chromium/media/audio/audio_unittest_util.cc
new file mode 100644
index 00000000000..0ddf94ad35f
--- /dev/null
+++ b/chromium/media/audio/audio_unittest_util.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_unittest_util.h"
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+// For macro ABORT_AUDIO_TEST_IF_NOT.
+bool ShouldAbortAudioTest(bool requirements_satisfied,
+ const char* requirements_expression,
+ bool* should_fail) {
+ bool fail_if_unsatisfied = base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kRequireAudioHardwareForTesting);
+ if (!requirements_satisfied) {
+ LOG(WARNING) << "Requirement(s) not satisfied (" << requirements_expression
+ << ")";
+ *should_fail = fail_if_unsatisfied;
+ return true;
+ }
+ *should_fail = false;
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_unittest_util.h b/chromium/media/audio/audio_unittest_util.h
new file mode 100644
index 00000000000..3bd0dfad676
--- /dev/null
+++ b/chromium/media/audio/audio_unittest_util.h
@@ -0,0 +1,34 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_UNITTEST_UTIL_H_
+#define MEDIA_AUDIO_AUDIO_UNITTEST_UTIL_H_
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Use in tests to either skip or fail a test when the system is missing a
+// required audio device or library. If the --require-audio-hardware-for-testing
+// flag is set, missing requirements will cause the test to fail. Otherwise it
+// will be skipped.
+#define ABORT_AUDIO_TEST_IF_NOT(requirements_satisfied) \
+ do { \
+ bool fail = false; \
+ if (ShouldAbortAudioTest(requirements_satisfied, #requirements_satisfied, \
+ &fail)) { \
+ if (fail) \
+ FAIL(); \
+ else \
+ return; \
+ } \
+ } while (false)
+
+bool ShouldAbortAudioTest(bool requirements_satisfied,
+ const char* requirements_expression,
+ bool* should_fail);
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_UNITTEST_UTIL_H_
diff --git a/chromium/media/audio/clockless_audio_sink.cc b/chromium/media/audio/clockless_audio_sink.cc
index 88e25b8af8d..44e9adb4451 100644
--- a/chromium/media/audio/clockless_audio_sink.cc
+++ b/chromium/media/audio/clockless_audio_sink.cc
@@ -5,8 +5,6 @@
#include "media/audio/clockless_audio_sink.h"
#include "base/threading/simple_thread.h"
-#include "base/time/time.h"
-#include "media/base/audio_renderer_sink.h"
namespace media {
@@ -44,10 +42,10 @@ class ClocklessAudioSinkThread : public base::DelegateSimpleThread::Delegate {
base::PlatformThread::YieldCurrentThread();
} else if (start.is_null()) {
// First time we processed some audio, so record the starting time.
- start = base::TimeTicks::HighResNow();
+ start = base::TimeTicks::Now();
} else {
// Keep track of the last time data was rendered.
- playback_time_ = base::TimeTicks::HighResNow() - start;
+ playback_time_ = base::TimeTicks::Now() - start;
}
}
}
diff --git a/chromium/media/audio/cras/audio_manager_cras.h b/chromium/media/audio/cras/audio_manager_cras.h
index 0bceb456ea5..4c8f992ee05 100644
--- a/chromium/media/audio/cras/audio_manager_cras.h
+++ b/chromium/media/audio/cras/audio_manager_cras.h
@@ -20,34 +20,34 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
AudioManagerCras(AudioLogFactory* audio_log_factory);
// AudioManager implementation.
- virtual bool HasAudioOutputDevices() override;
- virtual bool HasAudioInputDevices() override;
- virtual void ShowAudioInputSettings() override;
- virtual void GetAudioInputDeviceNames(
- AudioDeviceNames* device_names) override;
- virtual void GetAudioOutputDeviceNames(
- AudioDeviceNames* device_names) override;
- virtual AudioParameters GetInputStreamParameters(
+ bool HasAudioOutputDevices() override;
+ bool HasAudioInputDevices() override;
+ void ShowAudioInputSettings() override;
+ void GetAudioInputDeviceNames(AudioDeviceNames* device_names) override;
+ void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) override;
+ AudioParameters GetInputStreamParameters(
const std::string& device_id) override;
- virtual void SetHasKeyboardMic() override;
+ void SetHasKeyboardMic() override;
// AudioManagerBase implementation.
- virtual AudioOutputStream* MakeLinearOutputStream(
+ AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) override;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& device_id) override;
+ AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) override;
+ AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params,
const std::string& device_id) override;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) override;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) override;
static snd_pcm_format_t BitsToFormat(int bits_per_sample);
protected:
- virtual ~AudioManagerCras();
+ ~AudioManagerCras() override;
- virtual AudioParameters GetPreferredOutputStreamParameters(
+ AudioParameters GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) override;
diff --git a/chromium/media/audio/cras/cras_input.cc b/chromium/media/audio/cras/cras_input.cc
index 3fae8df1e77..31bba72b4a7 100644
--- a/chromium/media/audio/cras/cras_input.cc
+++ b/chromium/media/audio/cras/cras_input.cc
@@ -117,8 +117,8 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
CRAS_CH_SL,
CRAS_CH_SR
};
- COMPILE_ASSERT(arraysize(kChannelMap) == CHANNELS_MAX + 1,
- channel_map_size_do_not_match);
+ static_assert(arraysize(kChannelMap) == CHANNELS_MAX + 1,
+ "kChannelMap array size should match");
// If already playing, stop before re-starting.
if (started_)
@@ -159,6 +159,10 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
return;
}
+ uint32_t flags = 0;
+ if (params_.effects() & AudioParameters::PlatformEffectsMask::HOTWORD)
+ flags = HOTWORD_STREAM;
+
unsigned int frames_per_packet = params_.frames_per_buffer();
cras_stream_params* stream_params = cras_client_stream_params_create(
stream_direction_,
@@ -166,7 +170,7 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
frames_per_packet, // Call back when this many ready.
frames_per_packet, // Minimum Callback level ignored for capture streams.
CRAS_STREAM_TYPE_DEFAULT,
- 0, // Unused flags.
+ flags,
this,
CrasInputStream::SamplesReady,
CrasInputStream::StreamError,
diff --git a/chromium/media/audio/cras/cras_input.h b/chromium/media/audio/cras/cras_input.h
index f290f1e0986..be50ac741fa 100644
--- a/chromium/media/audio/cras/cras_input.h
+++ b/chromium/media/audio/cras/cras_input.h
@@ -25,22 +25,23 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
public:
// The ctor takes all the usual parameters, plus |manager| which is the
// audio manager who is creating this object.
- CrasInputStream(const AudioParameters& params, AudioManagerCras* manager,
+ CrasInputStream(const AudioParameters& params,
+ AudioManagerCras* manager,
const std::string& device_id);
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioOutputStream::Close().
- virtual ~CrasInputStream();
+ ~CrasInputStream() override;
// Implementation of AudioInputStream.
- virtual bool Open() override;
- virtual void Start(AudioInputCallback* callback) override;
- virtual void Stop() override;
- virtual void Close() override;
- virtual double GetMaxVolume() override;
- virtual void SetVolume(double volume) override;
- virtual double GetVolume() override;
- virtual bool IsMuted() override;
+ bool Open() override;
+ void Start(AudioInputCallback* callback) override;
+ void Stop() override;
+ void Close() override;
+ double GetMaxVolume() override;
+ void SetVolume(double volume) override;
+ double GetVolume() override;
+ bool IsMuted() override;
private:
// Handles requests to get samples from the provided buffer. This will be
diff --git a/chromium/media/audio/cras/cras_input_unittest.cc b/chromium/media/audio/cras/cras_input_unittest.cc
index 4113f582451..c3e3ca81d17 100644
--- a/chromium/media/audio/cras/cras_input_unittest.cc
+++ b/chromium/media/audio/cras/cras_input_unittest.cc
@@ -41,7 +41,7 @@ class MockAudioManagerCrasInput : public AudioManagerCras {
// of active output streams. It is because the number of active streams
// is managed inside MakeAudioInputStream, and we don't use
// MakeAudioInputStream to create the stream in the tests.
- virtual void ReleaseInputStream(AudioInputStream* stream) override {
+ void ReleaseInputStream(AudioInputStream* stream) override {
DCHECK(stream);
delete stream;
}
diff --git a/chromium/media/audio/cras/cras_unified.h b/chromium/media/audio/cras/cras_unified.h
index 464fee632de..2326648aa0b 100644
--- a/chromium/media/audio/cras/cras_unified.h
+++ b/chromium/media/audio/cras/cras_unified.h
@@ -33,15 +33,15 @@ class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioUnifiedStream::Close().
- virtual ~CrasUnifiedStream();
+ ~CrasUnifiedStream() override;
// Implementation of AudioOutputStream.
- virtual bool Open() override;
- virtual void Close() override;
- virtual void Start(AudioSourceCallback* callback) override;
- virtual void Stop() override;
- virtual void SetVolume(double volume) override;
- virtual void GetVolume(double* volume) override;
+ bool Open() override;
+ void Close() override;
+ void Start(AudioSourceCallback* callback) override;
+ void Stop() override;
+ void SetVolume(double volume) override;
+ void GetVolume(double* volume) override;
private:
// Convert Latency in time to bytes.
diff --git a/chromium/media/audio/cras/cras_unified_unittest.cc b/chromium/media/audio/cras/cras_unified_unittest.cc
index 932ccb4f2ce..2f29bfa5664 100644
--- a/chromium/media/audio/cras/cras_unified_unittest.cc
+++ b/chromium/media/audio/cras/cras_unified_unittest.cc
@@ -49,7 +49,7 @@ class MockAudioManagerCras : public AudioManagerCras {
// of active output streams. It is because the number of active streams
// is managed inside MakeAudioOutputStream, and we don't use
// MakeAudioOutputStream to create the stream in the tests.
- virtual void ReleaseOutputStream(AudioOutputStream* stream) override {
+ void ReleaseOutputStream(AudioOutputStream* stream) override {
DCHECK(stream);
delete stream;
}
@@ -104,11 +104,16 @@ const AudioParameters::Format CrasUnifiedStreamTest::kTestFormat =
const uint32 CrasUnifiedStreamTest::kTestFramesPerPacket = 1000;
TEST_F(CrasUnifiedStreamTest, ConstructedState) {
+ CrasUnifiedStream* test_stream = CreateStream(kTestChannelLayout);
+ EXPECT_TRUE(test_stream->Open());
+ test_stream->Close();
+
// Should support mono.
- CrasUnifiedStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ EXPECT_TRUE(test_stream->Open());
test_stream->Close();
- // Should support stereo.
+ // Should support multi-channel.
test_stream = CreateStream(CHANNEL_LAYOUT_SURROUND);
EXPECT_TRUE(test_stream->Open());
test_stream->Close();
@@ -127,11 +132,6 @@ TEST_F(CrasUnifiedStreamTest, ConstructedState) {
test_stream = new CrasUnifiedStream(bad_rate_params, mock_manager_.get());
EXPECT_FALSE(test_stream->Open());
test_stream->Close();
-
- // Check that Mono works too.
- test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- ASSERT_TRUE(test_stream->Open());
- test_stream->Close();
}
TEST_F(CrasUnifiedStreamTest, RenderFrames) {
diff --git a/chromium/media/audio/fake_audio_input_stream.cc b/chromium/media/audio/fake_audio_input_stream.cc
index 157d3978c76..458118da01c 100644
--- a/chromium/media/audio/fake_audio_input_stream.cc
+++ b/chromium/media/audio/fake_audio_input_stream.cc
@@ -5,59 +5,18 @@
#include "media/audio/fake_audio_input_stream.h"
#include "base/bind.h"
-#include "base/lazy_instance.h"
+#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/single_thread_task_runner.h"
+#include "base/time/time.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/simple_sources.h"
#include "media/base/audio_bus.h"
-
-using base::TimeTicks;
-using base::TimeDelta;
+#include "media/base/media_switches.h"
namespace media {
-namespace {
-
-// These values are based on experiments for local-to-local
-// PeerConnection to demonstrate audio/video synchronization.
-const int kBeepDurationMilliseconds = 20;
-const int kBeepFrequency = 400;
-
-// Intervals between two automatic beeps.
-const int kAutomaticBeepIntervalInMs = 500;
-
-// Automatic beep will be triggered every |kAutomaticBeepIntervalInMs| unless
-// users explicitly call BeepOnce(), which will disable the automatic beep.
-class BeepContext {
- public:
- BeepContext() : beep_once_(false), automatic_beep_(true) {}
-
- void SetBeepOnce(bool enable) {
- base::AutoLock auto_lock(lock_);
- beep_once_ = enable;
-
- // Disable the automatic beep if users explicit set |beep_once_| to true.
- if (enable)
- automatic_beep_ = false;
- }
- bool beep_once() const {
- base::AutoLock auto_lock(lock_);
- return beep_once_;
- }
- bool automatic_beep() const {
- base::AutoLock auto_lock(lock_);
- return automatic_beep_;
- }
-
- private:
- mutable base::Lock lock_;
- bool beep_once_;
- bool automatic_beep_;
-};
-
-static base::LazyInstance<BeepContext> g_beep_context =
- LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
AudioInputStream* FakeAudioInputStream::MakeFakeStream(
AudioManagerBase* manager,
const AudioParameters& params) {
@@ -68,124 +27,39 @@ FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
const AudioParameters& params)
: audio_manager_(manager),
callback_(NULL),
- buffer_size_((params.channels() * params.bits_per_sample() *
- params.frames_per_buffer()) /
- 8),
+ fake_audio_worker_(manager->GetWorkerTaskRunner(), params),
params_(params),
- task_runner_(manager->GetTaskRunner()),
- callback_interval_(base::TimeDelta::FromMilliseconds(
- (params.frames_per_buffer() * 1000) / params.sample_rate())),
- beep_duration_in_buffers_(kBeepDurationMilliseconds *
- params.sample_rate() /
- params.frames_per_buffer() /
- 1000),
- beep_generated_in_buffers_(0),
- beep_period_in_frames_(params.sample_rate() / kBeepFrequency),
- frames_elapsed_(0),
- audio_bus_(AudioBus::Create(params)),
- weak_factory_(this) {
+ audio_bus_(AudioBus::Create(params)) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
}
-FakeAudioInputStream::~FakeAudioInputStream() {}
+FakeAudioInputStream::~FakeAudioInputStream() {
+ DCHECK(!callback_);
+}
bool FakeAudioInputStream::Open() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
- buffer_.reset(new uint8[buffer_size_]);
- memset(buffer_.get(), 0, buffer_size_);
audio_bus_->Zero();
+
return true;
}
void FakeAudioInputStream::Start(AudioInputCallback* callback) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
- DCHECK(!callback_);
callback_ = callback;
- last_callback_time_ = TimeTicks::Now();
- task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&FakeAudioInputStream::DoCallback, weak_factory_.GetWeakPtr()),
- callback_interval_);
-}
-
-void FakeAudioInputStream::DoCallback() {
- DCHECK(callback_);
-
- const TimeTicks now = TimeTicks::Now();
- base::TimeDelta next_callback_time =
- last_callback_time_ + callback_interval_ * 2 - now;
-
- // If we are falling behind, try to catch up as much as we can in the next
- // callback.
- if (next_callback_time < base::TimeDelta())
- next_callback_time = base::TimeDelta();
-
- // Accumulate the time from the last beep.
- interval_from_last_beep_ += now - last_callback_time_;
-
- last_callback_time_ = now;
-
- memset(buffer_.get(), 0, buffer_size_);
-
- bool should_beep = false;
- {
- BeepContext* beep_context = g_beep_context.Pointer();
- if (beep_context->automatic_beep()) {
- base::TimeDelta delta = interval_from_last_beep_ -
- TimeDelta::FromMilliseconds(kAutomaticBeepIntervalInMs);
- if (delta > base::TimeDelta()) {
- should_beep = true;
- interval_from_last_beep_ = delta;
- }
- } else {
- should_beep = beep_context->beep_once();
- beep_context->SetBeepOnce(false);
- }
- }
-
- // If this object was instructed to generate a beep or has started to
- // generate a beep sound.
- if (should_beep || beep_generated_in_buffers_) {
- // Compute the number of frames to output high value. Then compute the
- // number of bytes based on channels and bits per channel.
- int high_frames = beep_period_in_frames_ / 2;
- int high_bytes = high_frames * params_.bits_per_sample() *
- params_.channels() / 8;
-
- // Separate high and low with the same number of bytes to generate a
- // square wave.
- int position = 0;
- while (position + high_bytes <= buffer_size_) {
- // Write high values first.
- memset(buffer_.get() + position, 128, high_bytes);
- // Then leave low values in the buffer with |high_bytes|.
- position += high_bytes * 2;
- }
-
- ++beep_generated_in_buffers_;
- if (beep_generated_in_buffers_ >= beep_duration_in_buffers_)
- beep_generated_in_buffers_ = 0;
- }
-
- audio_bus_->FromInterleaved(
- buffer_.get(), audio_bus_->frames(), params_.bits_per_sample() / 8);
- callback_->OnData(this, audio_bus_.get(), buffer_size_, 1.0);
- frames_elapsed_ += params_.frames_per_buffer();
-
- task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&FakeAudioInputStream::DoCallback, weak_factory_.GetWeakPtr()),
- next_callback_time);
+ fake_audio_worker_.Start(base::Bind(
+ &FakeAudioInputStream::ReadAudioFromSource, base::Unretained(this)));
}
void FakeAudioInputStream::Stop() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
- weak_factory_.InvalidateWeakPtrs();
+ fake_audio_worker_.Stop();
callback_ = NULL;
}
void FakeAudioInputStream::Close() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(!callback_);
audio_manager_->ReleaseInputStream(this);
}
@@ -208,16 +82,46 @@ bool FakeAudioInputStream::IsMuted() {
return false;
}
-void FakeAudioInputStream::SetAutomaticGainControl(bool enabled) {}
+bool FakeAudioInputStream::SetAutomaticGainControl(bool enabled) {
+ return false;
+}
bool FakeAudioInputStream::GetAutomaticGainControl() {
- return true;
+ return false;
+}
+
+void FakeAudioInputStream::ReadAudioFromSource() {
+ DCHECK(audio_manager_->GetWorkerTaskRunner()->BelongsToCurrentThread());
+ DCHECK(callback_);
+
+ if (!audio_source_)
+ audio_source_ = ChooseSource();
+
+ const int kNoDelay = 0;
+ audio_source_->OnMoreData(audio_bus_.get(), kNoDelay);
+ callback_->OnData(this, audio_bus_.get(), 0, 1.0);
+}
+
+using AudioSourceCallback = AudioOutputStream::AudioSourceCallback;
+scoped_ptr<AudioSourceCallback> FakeAudioInputStream::ChooseSource() {
+ DCHECK(audio_manager_->GetWorkerTaskRunner()->BelongsToCurrentThread());
+
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kUseFileForFakeAudioCapture)) {
+ base::FilePath path_to_wav_file =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValuePath(
+ switches::kUseFileForFakeAudioCapture);
+ CHECK(!path_to_wav_file.empty())
+ << "You must pass the file to use as argument to --"
+ << switches::kUseFileForFakeAudioCapture << ".";
+
+ return make_scoped_ptr(new FileSource(params_, path_to_wav_file));
+ }
+ return make_scoped_ptr(new BeepingSource(params_));
}
-// static
void FakeAudioInputStream::BeepOnce() {
- BeepContext* beep_context = g_beep_context.Pointer();
- beep_context->SetBeepOnce(true);
+ BeepingSource::BeepOnce();
}
} // namespace media
diff --git a/chromium/media/audio/fake_audio_input_stream.h b/chromium/media/audio/fake_audio_input_stream.h
index 5f9f804abab..4b8a98bceaf 100644
--- a/chromium/media/audio/fake_audio_input_stream.h
+++ b/chromium/media/audio/fake_audio_input_stream.h
@@ -9,23 +9,28 @@
#include <vector>
+#include "base/callback_forward.h"
#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/thread.h"
-#include "base/time/time.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
+#include "media/audio/fake_audio_worker.h"
+
namespace media {
class AudioBus;
class AudioManagerBase;
+class SimpleSource;
+// This class acts as a fake audio input stream. The default is to generate a
+// beeping sound unless --use-file-for-fake-audio-capture=<file> is specified,
+// in which case the indicated .wav file will be read and played into the
+// stream.
class MEDIA_EXPORT FakeAudioInputStream
: public AudioInputStream {
public:
- static AudioInputStream* MakeFakeStream(AudioManagerBase* manager,
- const AudioParameters& params);
+ static AudioInputStream* MakeFakeStream(
+ AudioManagerBase* manager, const AudioParameters& params);
bool Open() override;
void Start(AudioInputCallback* callback) override;
@@ -35,15 +40,16 @@ class MEDIA_EXPORT FakeAudioInputStream
void SetVolume(double volume) override;
double GetVolume() override;
bool IsMuted() override;
- void SetAutomaticGainControl(bool enabled) override;
+ bool SetAutomaticGainControl(bool enabled) override;
bool GetAutomaticGainControl() override;
- // Generate one beep sound. This method is called by
- // FakeVideoCaptureDevice to test audio/video synchronization.
- // This is a static method because FakeVideoCaptureDevice is
- // disconnected from an audio device. This means only one instance of
- // this class gets to respond, which is okay because we assume there's
- // only one stream for this testing purpose.
+ // Generate one beep sound. This method is called by FakeVideoCaptureDevice to
+ // test audio/video synchronization. This is a static method because
+ // FakeVideoCaptureDevice is disconnected from an audio device. This means
+ // only one instance of this class gets to respond, which is okay because we
+ // assume there's only one stream for this testing purpose. Furthermore this
+ // method will do nothing if --use-file-for-fake-audio-capture is specified
+ // since the input stream will be playing from a file instead of beeping.
// TODO(hclam): Make this non-static. To do this we'll need to fix
// crbug.com/159053 such that video capture device is aware of audio
// input stream.
@@ -52,29 +58,18 @@ class MEDIA_EXPORT FakeAudioInputStream
private:
FakeAudioInputStream(AudioManagerBase* manager,
const AudioParameters& params);
-
~FakeAudioInputStream() override;
- void DoCallback();
+ scoped_ptr<AudioOutputStream::AudioSourceCallback> ChooseSource();
+ void ReadAudioFromSource();
AudioManagerBase* audio_manager_;
AudioInputCallback* callback_;
- scoped_ptr<uint8[]> buffer_;
- int buffer_size_;
+ FakeAudioWorker fake_audio_worker_;
AudioParameters params_;
- const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- base::TimeTicks last_callback_time_;
- base::TimeDelta callback_interval_;
- base::TimeDelta interval_from_last_beep_;
- int beep_duration_in_buffers_;
- int beep_generated_in_buffers_;
- int beep_period_in_frames_;
- int frames_elapsed_;
- scoped_ptr<media::AudioBus> audio_bus_;
- // Allows us to run tasks on the FakeAudioInputStream instance which are
- // bound by its lifetime.
- base::WeakPtrFactory<FakeAudioInputStream> weak_factory_;
+ scoped_ptr<AudioOutputStream::AudioSourceCallback> audio_source_;
+ scoped_ptr<media::AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioInputStream);
};
diff --git a/chromium/media/audio/fake_audio_output_stream.cc b/chromium/media/audio/fake_audio_output_stream.cc
index d5e0d5f551f..b919b84ccf9 100644
--- a/chromium/media/audio/fake_audio_output_stream.cc
+++ b/chromium/media/audio/fake_audio_output_stream.cc
@@ -22,7 +22,8 @@ FakeAudioOutputStream::FakeAudioOutputStream(AudioManagerBase* manager,
const AudioParameters& params)
: audio_manager_(manager),
callback_(NULL),
- fake_consumer_(manager->GetWorkerTaskRunner(), params) {
+ fake_worker_(manager->GetWorkerTaskRunner(), params),
+ audio_bus_(AudioBus::Create(params)) {
}
FakeAudioOutputStream::~FakeAudioOutputStream() {
@@ -31,19 +32,20 @@ FakeAudioOutputStream::~FakeAudioOutputStream() {
bool FakeAudioOutputStream::Open() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
+ audio_bus_->Zero();
return true;
}
void FakeAudioOutputStream::Start(AudioSourceCallback* callback) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
callback_ = callback;
- fake_consumer_.Start(base::Bind(
+ fake_worker_.Start(base::Bind(
&FakeAudioOutputStream::CallOnMoreData, base::Unretained(this)));
}
void FakeAudioOutputStream::Stop() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
- fake_consumer_.Stop();
+ fake_worker_.Stop();
callback_ = NULL;
}
@@ -59,9 +61,9 @@ void FakeAudioOutputStream::GetVolume(double* volume) {
*volume = 0;
};
-void FakeAudioOutputStream::CallOnMoreData(AudioBus* audio_bus) {
+void FakeAudioOutputStream::CallOnMoreData() {
DCHECK(audio_manager_->GetWorkerTaskRunner()->BelongsToCurrentThread());
- callback_->OnMoreData(audio_bus, 0);
+ callback_->OnMoreData(audio_bus_.get(), 0);
}
} // namespace media
diff --git a/chromium/media/audio/fake_audio_output_stream.h b/chromium/media/audio/fake_audio_output_stream.h
index e5e62919d4b..712a6b92b05 100644
--- a/chromium/media/audio/fake_audio_output_stream.h
+++ b/chromium/media/audio/fake_audio_output_stream.h
@@ -8,7 +8,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/fake_audio_consumer.h"
+#include "media/audio/fake_audio_worker.h"
namespace media {
@@ -36,11 +36,12 @@ class MEDIA_EXPORT FakeAudioOutputStream : public AudioOutputStream {
~FakeAudioOutputStream() override;
// Task that periodically calls OnMoreData() to consume audio data.
- void CallOnMoreData(AudioBus* audio_bus);
+ void CallOnMoreData();
AudioManagerBase* audio_manager_;
AudioSourceCallback* callback_;
- FakeAudioConsumer fake_consumer_;
+ FakeAudioWorker fake_worker_;
+ scoped_ptr<AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioOutputStream);
};
diff --git a/chromium/media/audio/fake_audio_consumer.cc b/chromium/media/audio/fake_audio_worker.cc
index ca99424f419..44177d385ec 100644
--- a/chromium/media/audio/fake_audio_consumer.cc
+++ b/chromium/media/audio/fake_audio_worker.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/audio/fake_audio_consumer.h"
+#include "media/audio/fake_audio_worker.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -15,18 +15,17 @@
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
namespace media {
-class FakeAudioConsumer::Worker
- : public base::RefCountedThreadSafe<FakeAudioConsumer::Worker> {
+class FakeAudioWorker::Worker
+ : public base::RefCountedThreadSafe<FakeAudioWorker::Worker> {
public:
Worker(const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params);
bool IsStopped();
- void Start(const ReadCB& read_cb);
+ void Start(const base::Closure& worker_cb);
void Stop();
private:
@@ -39,114 +38,110 @@ class FakeAudioConsumer::Worker
// Cancel any delayed callbacks to DoRead() in the worker loop's queue.
void DoCancel();
- // Task that regularly calls |read_cb_| according to the playback rate as
+ // Task that regularly calls |worker_cb_| according to the playback rate as
// determined by the audio parameters given during construction. Runs on
// the worker loop.
void DoRead();
const scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
- const scoped_ptr<AudioBus> audio_bus_;
const base::TimeDelta buffer_duration_;
- base::Lock read_cb_lock_; // Held while mutating or running |read_cb_|.
- ReadCB read_cb_;
+ base::Lock worker_cb_lock_; // Held while mutating or running |worker_cb_|.
+ base::Closure worker_cb_;
base::TimeTicks next_read_time_;
// Used to cancel any delayed tasks still inside the worker loop's queue.
- base::CancelableClosure read_task_cb_;
+ base::CancelableClosure worker_task_cb_;
base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(Worker);
};
-FakeAudioConsumer::FakeAudioConsumer(
+FakeAudioWorker::FakeAudioWorker(
const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params)
: worker_(new Worker(worker_task_runner, params)) {
}
-FakeAudioConsumer::~FakeAudioConsumer() {
+FakeAudioWorker::~FakeAudioWorker() {
DCHECK(worker_->IsStopped());
}
-void FakeAudioConsumer::Start(const ReadCB& read_cb) {
+void FakeAudioWorker::Start(const base::Closure& worker_cb) {
DCHECK(worker_->IsStopped());
- worker_->Start(read_cb);
+ worker_->Start(worker_cb);
}
-void FakeAudioConsumer::Stop() {
+void FakeAudioWorker::Stop() {
worker_->Stop();
}
-FakeAudioConsumer::Worker::Worker(
+FakeAudioWorker::Worker::Worker(
const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params)
: worker_task_runner_(worker_task_runner),
- audio_bus_(AudioBus::Create(params)),
buffer_duration_(base::TimeDelta::FromMicroseconds(
params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
static_cast<float>(params.sample_rate()))) {
- audio_bus_->Zero();
-
// Worker can be constructed on any thread, but will DCHECK that its
// Start/Stop methods are called from the same thread.
thread_checker_.DetachFromThread();
}
-FakeAudioConsumer::Worker::~Worker() {
- DCHECK(read_cb_.is_null());
+FakeAudioWorker::Worker::~Worker() {
+ DCHECK(worker_cb_.is_null());
}
-bool FakeAudioConsumer::Worker::IsStopped() {
- base::AutoLock scoped_lock(read_cb_lock_);
- return read_cb_.is_null();
+bool FakeAudioWorker::Worker::IsStopped() {
+ base::AutoLock scoped_lock(worker_cb_lock_);
+ return worker_cb_.is_null();
}
-void FakeAudioConsumer::Worker::Start(const ReadCB& read_cb) {
+void FakeAudioWorker::Worker::Start(const base::Closure& worker_cb) {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!read_cb.is_null());
+ DCHECK(!worker_cb.is_null());
{
- base::AutoLock scoped_lock(read_cb_lock_);
- DCHECK(read_cb_.is_null());
- read_cb_ = read_cb;
+ base::AutoLock scoped_lock(worker_cb_lock_);
+ DCHECK(worker_cb_.is_null());
+ worker_cb_ = worker_cb;
}
worker_task_runner_->PostTask(FROM_HERE, base::Bind(&Worker::DoStart, this));
}
-void FakeAudioConsumer::Worker::DoStart() {
+void FakeAudioWorker::Worker::DoStart() {
DCHECK(worker_task_runner_->BelongsToCurrentThread());
next_read_time_ = base::TimeTicks::Now();
- read_task_cb_.Reset(base::Bind(&Worker::DoRead, this));
- read_task_cb_.callback().Run();
+ worker_task_cb_.Reset(base::Bind(&Worker::DoRead, this));
+ worker_task_cb_.callback().Run();
}
-void FakeAudioConsumer::Worker::Stop() {
+void FakeAudioWorker::Worker::Stop() {
DCHECK(thread_checker_.CalledOnValidThread());
{
- base::AutoLock scoped_lock(read_cb_lock_);
- if (read_cb_.is_null())
+ base::AutoLock scoped_lock(worker_cb_lock_);
+ if (worker_cb_.is_null())
return;
- read_cb_.Reset();
+ worker_cb_.Reset();
}
worker_task_runner_->PostTask(FROM_HERE, base::Bind(&Worker::DoCancel, this));
}
-void FakeAudioConsumer::Worker::DoCancel() {
+void FakeAudioWorker::Worker::DoCancel() {
DCHECK(worker_task_runner_->BelongsToCurrentThread());
- read_task_cb_.Cancel();
+ worker_task_cb_.Cancel();
}
-void FakeAudioConsumer::Worker::DoRead() {
+void FakeAudioWorker::Worker::DoRead() {
DCHECK(worker_task_runner_->BelongsToCurrentThread());
{
- base::AutoLock scoped_lock(read_cb_lock_);
- if (!read_cb_.is_null())
- read_cb_.Run(audio_bus_.get());
+ base::AutoLock scoped_lock(worker_cb_lock_);
+ if (!worker_cb_.is_null())
+ worker_cb_.Run();
}
- // Need to account for time spent here due to the cost of |read_cb_| as well
+ // Need to account for time spent here due to the cost of |worker_cb| as well
// as the imprecision of PostDelayedTask().
const base::TimeTicks now = base::TimeTicks::Now();
base::TimeDelta delay = next_read_time_ + buffer_duration_ - now;
@@ -157,7 +152,7 @@ void FakeAudioConsumer::Worker::DoRead() {
next_read_time_ = now + delay;
worker_task_runner_->PostDelayedTask(
- FROM_HERE, read_task_cb_.callback(), delay);
+ FROM_HERE, worker_task_cb_.callback(), delay);
}
} // namespace media
diff --git a/chromium/media/audio/fake_audio_consumer.h b/chromium/media/audio/fake_audio_worker.h
index 18c552ad97b..d3511a3a48c 100644
--- a/chromium/media/audio/fake_audio_consumer.h
+++ b/chromium/media/audio/fake_audio_worker.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_AUDIO_FAKE_AUDIO_CONSUMER_H_
-#define MEDIA_AUDIO_FAKE_AUDIO_CONSUMER_H_
+#ifndef MEDIA_AUDIO_FAKE_AUDIO_WORKER_H_
+#define MEDIA_AUDIO_FAKE_AUDIO_WORKER_H_
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
@@ -17,27 +17,26 @@ namespace media {
class AudioBus;
class AudioParameters;
-// A fake audio consumer. Using a provided message loop, FakeAudioConsumer will
-// simulate a real time consumer of audio data.
-class MEDIA_EXPORT FakeAudioConsumer {
+// A fake audio worker. Using a provided message loop, FakeAudioWorker will
+// call back the provided callback like a real audio consumer or producer would.
+class MEDIA_EXPORT FakeAudioWorker {
public:
- // |worker_task_runner| is the task runner on which the ReadCB provided to
+ // |worker_task_runner| is the task runner on which the closure provided to
// Start() will be executed on. This may or may not be the be for the same
// thread that invokes the Start/Stop methods.
// |params| is used to determine the frequency of callbacks.
- FakeAudioConsumer(
+ FakeAudioWorker(
const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params);
- ~FakeAudioConsumer();
+ ~FakeAudioWorker();
- // Start executing |read_cb| at a regular intervals. Stop() must be called by
- // the same thread before destroying FakeAudioConsumer.
- typedef base::Callback<void(AudioBus* audio_bus)> ReadCB;
- void Start(const ReadCB& read_cb);
+ // Start executing |worker_cb| at a regular intervals. Stop() must be called
+ // by the same thread before destroying FakeAudioWorker.
+ void Start(const base::Closure& worker_cb);
- // Stop executing the ReadCB provided to Start(). Blocks until the worker
- // loop is not inside a ReadCB invocation. Safe to call multiple times. Must
- // be called on the same thread that called Start().
+ // Stop executing the closure provided to Start(). Blocks until the worker
+ // loop is not inside a closure invocation. Safe to call multiple times.
+ // Must be called on the same thread that called Start().
void Stop();
private:
@@ -47,9 +46,9 @@ class MEDIA_EXPORT FakeAudioConsumer {
class Worker;
const scoped_refptr<Worker> worker_;
- DISALLOW_COPY_AND_ASSIGN(FakeAudioConsumer);
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioWorker);
};
} // namespace media
-#endif // MEDIA_AUDIO_FAKE_AUDIO_CONSUMER_H_
+#endif // MEDIA_AUDIO_FAKE_AUDIO_WORKER_H_
diff --git a/chromium/media/audio/fake_audio_consumer_unittest.cc b/chromium/media/audio/fake_audio_worker_unittest.cc
index ab97eaea8c7..49a69d2b086 100644
--- a/chromium/media/audio/fake_audio_consumer_unittest.cc
+++ b/chromium/media/audio/fake_audio_worker_unittest.cc
@@ -6,7 +6,7 @@
#include "base/message_loop/message_loop.h"
#include "base/time/time.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/fake_audio_consumer.h"
+#include "media/audio/fake_audio_worker.h"
#include "media/audio/simple_sources.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -14,57 +14,57 @@ namespace media {
static const int kTestCallbacks = 5;
-class FakeAudioConsumerTest : public testing::Test {
+class FakeAudioWorkerTest : public testing::Test {
public:
- FakeAudioConsumerTest()
+ FakeAudioWorkerTest()
: params_(
AudioParameters::AUDIO_FAKE, CHANNEL_LAYOUT_STEREO, 44100, 8, 128),
- fake_consumer_(message_loop_.message_loop_proxy(), params_),
- source_(params_.channels(), 200.0, params_.sample_rate()) {
+ fake_worker_(message_loop_.message_loop_proxy(), params_),
+ seen_callbacks_(0) {
time_between_callbacks_ = base::TimeDelta::FromMicroseconds(
params_.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
static_cast<float>(params_.sample_rate()));
}
- ~FakeAudioConsumerTest() override {}
+ ~FakeAudioWorkerTest() override {}
- void ConsumeData(AudioBus* audio_bus) {
- source_.OnMoreData(audio_bus, 0);
+ void CalledByFakeWorker() {
+ seen_callbacks_++;
}
void RunOnAudioThread() {
ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
- fake_consumer_.Start(base::Bind(
- &FakeAudioConsumerTest::ConsumeData, base::Unretained(this)));
+ fake_worker_.Start(base::Bind(
+ &FakeAudioWorkerTest::CalledByFakeWorker, base::Unretained(this)));
}
void RunOnceOnAudioThread() {
ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
RunOnAudioThread();
- // Start() should immediately post a task to run the source callback, so we
+ // Start() should immediately post a task to run the callback, so we
// should end up with only a single callback being run.
message_loop_.PostTask(FROM_HERE, base::Bind(
- &FakeAudioConsumerTest::EndTest, base::Unretained(this), 1));
+ &FakeAudioWorkerTest::EndTest, base::Unretained(this), 1));
}
void StopStartOnAudioThread() {
ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
- fake_consumer_.Stop();
+ fake_worker_.Stop();
RunOnAudioThread();
}
void TimeCallbacksOnAudioThread(int callbacks) {
ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
- if (source_.callbacks() == 0) {
+ if (seen_callbacks_ == 0) {
RunOnAudioThread();
start_time_ = base::TimeTicks::Now();
}
// Keep going until we've seen the requested number of callbacks.
- if (source_.callbacks() < callbacks) {
+ if (seen_callbacks_ < callbacks) {
message_loop_.PostDelayedTask(FROM_HERE, base::Bind(
- &FakeAudioConsumerTest::TimeCallbacksOnAudioThread,
+ &FakeAudioWorkerTest::TimeCallbacksOnAudioThread,
base::Unretained(this), callbacks), time_between_callbacks_ / 2);
} else {
end_time_ = base::TimeTicks::Now();
@@ -74,46 +74,45 @@ class FakeAudioConsumerTest : public testing::Test {
void EndTest(int callbacks) {
ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
- fake_consumer_.Stop();
- EXPECT_LE(callbacks, source_.callbacks());
+ fake_worker_.Stop();
+ EXPECT_LE(callbacks, seen_callbacks_);
message_loop_.PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
}
protected:
base::MessageLoop message_loop_;
AudioParameters params_;
- FakeAudioConsumer fake_consumer_;
- SineWaveAudioSource source_;
+ FakeAudioWorker fake_worker_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
base::TimeDelta time_between_callbacks_;
+ int seen_callbacks_;
private:
- DISALLOW_COPY_AND_ASSIGN(FakeAudioConsumerTest);
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioWorkerTest);
};
-// Ensure the fake audio stream runs on the audio thread and handles fires
-// callbacks to the AudioSourceCallback.
-TEST_F(FakeAudioConsumerTest, FakeStreamBasicCallback) {
+// Ensure the worker runs on the audio thread and fires callbacks.
+TEST_F(FakeAudioWorkerTest, FakeBasicCallback) {
message_loop_.PostTask(FROM_HERE, base::Bind(
- &FakeAudioConsumerTest::RunOnceOnAudioThread,
+ &FakeAudioWorkerTest::RunOnceOnAudioThread,
base::Unretained(this)));
message_loop_.Run();
}
// Ensure the time between callbacks is sane.
-TEST_F(FakeAudioConsumerTest, TimeBetweenCallbacks) {
+TEST_F(FakeAudioWorkerTest, TimeBetweenCallbacks) {
message_loop_.PostTask(FROM_HERE, base::Bind(
- &FakeAudioConsumerTest::TimeCallbacksOnAudioThread,
+ &FakeAudioWorkerTest::TimeCallbacksOnAudioThread,
base::Unretained(this), kTestCallbacks));
message_loop_.Run();
// There are only (kTestCallbacks - 1) intervals between kTestCallbacks.
base::TimeDelta actual_time_between_callbacks =
- (end_time_ - start_time_) / (source_.callbacks() - 1);
+ (end_time_ - start_time_) / (seen_callbacks_ - 1);
// Ensure callback time is no faster than the expected time between callbacks.
- EXPECT_TRUE(actual_time_between_callbacks >= time_between_callbacks_);
+ EXPECT_GE(actual_time_between_callbacks, time_between_callbacks_);
// Softly check if the callback time is no slower than twice the expected time
// between callbacks. Since this test runs on the bots we can't be too strict
@@ -122,17 +121,17 @@ TEST_F(FakeAudioConsumerTest, TimeBetweenCallbacks) {
LOG(ERROR) << "Time between fake audio callbacks is too large!";
}
-// Ensure Start()/Stop() on the stream doesn't generate too many callbacks. See
-// http://crbug.com/159049
-TEST_F(FakeAudioConsumerTest, StartStopClearsCallbacks) {
+// Ensure Start()/Stop() on the worker doesn't generate too many callbacks. See
+// http://crbug.com/159049.
+TEST_F(FakeAudioWorkerTest, StartStopClearsCallbacks) {
message_loop_.PostTask(FROM_HERE, base::Bind(
- &FakeAudioConsumerTest::TimeCallbacksOnAudioThread,
+ &FakeAudioWorkerTest::TimeCallbacksOnAudioThread,
base::Unretained(this), kTestCallbacks));
// Issue a Stop() / Start() in between expected callbacks to maximize the
- // chance of catching the FakeAudioOutputStream doing the wrong thing.
+ // chance of catching the worker doing the wrong thing.
message_loop_.PostDelayedTask(FROM_HERE, base::Bind(
- &FakeAudioConsumerTest::StopStartOnAudioThread,
+ &FakeAudioWorkerTest::StopStartOnAudioThread,
base::Unretained(this)), time_between_callbacks_ / 2);
// EndTest() will ensure the proper number of callbacks have occurred.
diff --git a/chromium/media/audio/linux/audio_manager_linux.cc b/chromium/media/audio/linux/audio_manager_linux.cc
index e7824b4d6c8..0e7c9810a5c 100644
--- a/chromium/media/audio/linux/audio_manager_linux.cc
+++ b/chromium/media/audio/linux/audio_manager_linux.cc
@@ -28,7 +28,7 @@ enum LinuxAudioIO {
AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
#if defined(USE_CRAS)
- if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUseCras)) {
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kUseCras)) {
UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kCras, kAudioIOMax + 1);
return new AudioManagerCras(audio_log_factory);
}
diff --git a/chromium/media/audio/mac/audio_auhal_mac.cc b/chromium/media/audio/mac/audio_auhal_mac.cc
index a199eb93f01..9f665fc7c7c 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac.cc
@@ -9,10 +9,10 @@
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/base/audio_pull_fifo.h"
@@ -55,11 +55,11 @@ AUHALStream::AUHALStream(
// We must have a manager.
DCHECK(manager_);
- VLOG(1) << "AUHALStream::AUHALStream()";
- VLOG(1) << "Device: " << device;
- VLOG(1) << "Output channels: " << output_channels_;
- VLOG(1) << "Sample rate: " << params_.sample_rate();
- VLOG(1) << "Buffer size: " << number_of_frames_;
+ DVLOG(1) << "AUHALStream::AUHALStream()";
+ DVLOG(1) << "Device: " << device;
+ DVLOG(1) << "Output channels: " << output_channels_;
+ DVLOG(1) << "Sample rate: " << params_.sample_rate();
+ DVLOG(1) << "Buffer size: " << number_of_frames_;
}
AUHALStream::~AUHALStream() {
@@ -193,8 +193,9 @@ OSStatus AUHALStream::Render(
if (number_of_frames != number_of_frames_) {
// Create a FIFO on the fly to handle any discrepancies in callback rates.
if (!audio_fifo_) {
- VLOG(1) << "Audio frame size changed from " << number_of_frames_ << " to "
- << number_of_frames << "; adding FIFO to compensate.";
+ DVLOG(1) << "Audio frame size changed from " << number_of_frames_
+ << " to " << number_of_frames
+ << "; adding FIFO to compensate.";
audio_fifo_.reset(new AudioPullFifo(
output_channels_,
number_of_frames_,
diff --git a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
index 69179d56078..b9c857d706c 100644
--- a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -9,6 +9,7 @@
#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/mock_audio_source_callback.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -39,16 +40,14 @@ class AUHALStreamTest : public testing::Test {
base::RunLoop().RunUntilIdle();
}
- virtual ~AUHALStreamTest() {
- base::RunLoop().RunUntilIdle();
- }
+ ~AUHALStreamTest() override { base::RunLoop().RunUntilIdle(); }
AudioOutputStream* Create() {
return manager_->MakeAudioOutputStream(
manager_->GetDefaultOutputStreamParameters(), "");
}
- bool CanRunAudioTests() {
+ bool OutputDevicesAvailable() {
return manager_->HasAudioOutputDevices();
}
@@ -62,8 +61,7 @@ class AUHALStreamTest : public testing::Test {
};
TEST_F(AUHALStreamTest, HardwareSampleRate) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
const AudioParameters preferred_params =
manager_->GetDefaultOutputStreamParameters();
EXPECT_GE(preferred_params.sample_rate(), 16000);
@@ -71,22 +69,19 @@ TEST_F(AUHALStreamTest, HardwareSampleRate) {
}
TEST_F(AUHALStreamTest, CreateClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
Create()->Close();
}
TEST_F(AUHALStreamTest, CreateOpenClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
AudioOutputStream* stream = Create();
EXPECT_TRUE(stream->Open());
stream->Close();
}
TEST_F(AUHALStreamTest, CreateOpenStartStopClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(OutputDevicesAvailable());
AudioOutputStream* stream = Create();
EXPECT_TRUE(stream->Open());
diff --git a/chromium/media/audio/mac/audio_device_listener_mac.cc b/chromium/media/audio/mac/audio_device_listener_mac.cc
index ef8bdd5b96c..5054d310d00 100644
--- a/chromium/media/audio/mac/audio_device_listener_mac.cc
+++ b/chromium/media/audio/mac/audio_device_listener_mac.cc
@@ -8,7 +8,6 @@
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "base/mac/mac_util.h"
#include "base/message_loop/message_loop.h"
#include "base/pending_task.h"
diff --git a/chromium/media/audio/mac/audio_input_mac.cc b/chromium/media/audio/mac/audio_input_mac.cc
index af5d9bc16e5..bc443866ddf 100644
--- a/chromium/media/audio/mac/audio_input_mac.cc
+++ b/chromium/media/audio/mac/audio_input_mac.cc
@@ -141,8 +141,9 @@ bool PCMQueueInAudioInputStream::IsMuted() {
return false;
}
-void PCMQueueInAudioInputStream::SetAutomaticGainControl(bool enabled) {
+bool PCMQueueInAudioInputStream::SetAutomaticGainControl(bool enabled) {
NOTREACHED() << "Only supported for low-latency mode.";
+ return false;
}
bool PCMQueueInAudioInputStream::GetAutomaticGainControl() {
diff --git a/chromium/media/audio/mac/audio_input_mac.h b/chromium/media/audio/mac/audio_input_mac.h
index 56453d2eddb..022033439bf 100644
--- a/chromium/media/audio/mac/audio_input_mac.h
+++ b/chromium/media/audio/mac/audio_input_mac.h
@@ -36,7 +36,7 @@ class PCMQueueInAudioInputStream : public AudioInputStream {
double GetMaxVolume() override;
void SetVolume(double volume) override;
double GetVolume() override;
- void SetAutomaticGainControl(bool enabled) override;
+ bool SetAutomaticGainControl(bool enabled) override;
bool GetAutomaticGainControl() override;
bool IsMuted() override;
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.cc b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
index fdd072ac3f2..e5e51bb3404 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
@@ -19,6 +19,10 @@ namespace media {
// Number of blocks of buffers used in the |fifo_|.
const int kNumberOfBlocksBufferInFifo = 2;
+// Max length of sequence of TooManyFramesToProcessError errors.
+// The stream will be stopped as soon as this time limit is passed.
+const int kMaxErrorTimeoutInSeconds = 1;
+
static std::ostream& operator<<(std::ostream& os,
const AudioStreamBasicDescription& format) {
os << "sample rate : " << format.mSampleRate << std::endl
@@ -241,6 +245,7 @@ void AUAudioInputStream::Start(AudioInputCallback* callback) {
}
sink_ = callback;
+ last_success_time_ = base::TimeTicks::Now();
StartAgc();
OSStatus result = AudioOutputUnitStart(audio_unit_);
if (result == noErr) {
@@ -496,8 +501,29 @@ OSStatus AUAudioInputStream::InputProc(void* user_data,
if (result) {
UMA_HISTOGRAM_SPARSE_SLOWLY("Media.AudioInputCbErrorMac", result);
OSSTATUS_DLOG(ERROR, result) << "AudioUnitRender() failed ";
+ if (result != kAudioUnitErr_TooManyFramesToProcess) {
+ audio_input->HandleError(result);
+ } else {
+ DCHECK(!audio_input->last_success_time_.is_null());
+ // We delay stopping the stream for kAudioUnitErr_TooManyFramesToProcess
+ // since it has been observed that some USB headsets can cause this error
+ // but only for a few initial frames at startup and then then the stream
+ // returns to a stable state again. See b/19524368 for details.
+ // Instead, we measure time since last valid audio frame and call
+ // HandleError() only if a too long error sequence is detected. We do
+ // this to avoid ending up in a non recoverable bad core audio state.
+ base::TimeDelta time_since_last_success =
+ base::TimeTicks::Now() - audio_input->last_success_time_;
+ if ((time_since_last_success >
+ base::TimeDelta::FromSeconds(kMaxErrorTimeoutInSeconds))) {
+ DLOG(ERROR) << "Too long sequence of TooManyFramesToProcess errors!";
+ audio_input->HandleError(result);
+ }
+ }
return result;
}
+ // Update time of successful call to AudioUnitRender().
+ audio_input->last_success_time_ = base::TimeTicks::Now();
// Deliver recorded data to the consumer as a callback.
return audio_input->Provide(number_of_frames,
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.h b/chromium/media/audio/mac/audio_low_latency_input_mac.h
index 15ee2431ea2..abbac4b332a 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.h
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.h
@@ -42,6 +42,7 @@
#include "base/cancelable_callback.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
+#include "base/time/time.h"
#include "media/audio/agc_audio_stream.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
@@ -157,6 +158,12 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// Used to defer Start() to workaround http://crbug.com/160920.
base::CancelableClosure deferred_start_cb_;
+ // Contains time of last successful call to AudioUnitRender().
+ // Initialized first time in Start() and then updated for each valid
+ // audio buffer. Used to detect long error sequences and to take actions
+ // if length of error sequence is above a certain limit.
+ base::TimeTicks last_success_time_;
+
DISALLOW_COPY_AND_ASSIGN(AUAudioInputStream);
};
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
index 7960b42c646..b1a8b3b69ac 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
@@ -10,6 +10,7 @@
#include "base/threading/platform_thread.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/mac/audio_low_latency_input_mac.h"
#include "media/base/seekable_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -110,17 +111,10 @@ class MacAudioInputTest : public testing::Test {
base::RunLoop().RunUntilIdle();
}
- virtual ~MacAudioInputTest() {
- base::RunLoop().RunUntilIdle();
- }
+ ~MacAudioInputTest() override { base::RunLoop().RunUntilIdle(); }
- // Convenience method which ensures that we are not running on the build
- // bots and that at least one valid input device can be found.
- bool CanRunAudioTests() {
- bool has_input = audio_manager_->HasAudioInputDevices();
- if (!has_input)
- LOG(WARNING) << "No input devices detected";
- return has_input;
+ bool InputDevicesAvailable() {
+ return audio_manager_->HasAudioInputDevices();
}
// Convenience method which creates a default AudioInputStream object using
@@ -156,16 +150,14 @@ class MacAudioInputTest : public testing::Test {
// Test Create(), Close().
TEST_F(MacAudioInputTest, AUAudioInputStreamCreateAndClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioInputStream* ais = CreateDefaultAudioInputStream();
ais->Close();
}
// Test Open(), Close().
TEST_F(MacAudioInputTest, AUAudioInputStreamOpenAndClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioInputStream* ais = CreateDefaultAudioInputStream();
EXPECT_TRUE(ais->Open());
ais->Close();
@@ -173,8 +165,7 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamOpenAndClose) {
// Test Open(), Start(), Close().
TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartAndClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioInputStream* ais = CreateDefaultAudioInputStream();
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
@@ -184,8 +175,7 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartAndClose) {
// Test Open(), Start(), Stop(), Close().
TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartStopAndClose) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioInputStream* ais = CreateDefaultAudioInputStream();
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
@@ -196,8 +186,7 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartStopAndClose) {
// Test some additional calling sequences.
TEST_F(MacAudioInputTest, AUAudioInputStreamMiscCallingSequences) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
AudioInputStream* ais = CreateDefaultAudioInputStream();
AUAudioInputStream* auais = static_cast<AUAudioInputStream*>(ais);
@@ -224,8 +213,7 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamMiscCallingSequences) {
// Verify that recording starts and stops correctly in mono using mocked sink.
TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyMonoRecording) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
int count = 0;
@@ -251,8 +239,7 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyMonoRecording) {
// Verify that recording starts and stops correctly in mono using mocked sink.
TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
int count = 0;
@@ -290,8 +277,7 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
// environment variable to a value greater than 0.
TEST_F(MacAudioInputTest, DISABLED_AUAudioInputStreamRecordToFile) {
- if (!CanRunAudioTests())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(InputDevicesAvailable());
const char* file_name = "out_stereo_10sec.pcm";
int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index b3d5978f044..42651ffbea8 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -4,8 +4,6 @@
#include "media/audio/mac/audio_manager_mac.h"
-#include <CoreAudio/AudioHardware.h>
-#include <string>
#include "base/bind.h"
#include "base/command_line.h"
diff --git a/chromium/media/audio/null_audio_sink.cc b/chromium/media/audio/null_audio_sink.cc
index dfd07fcee6a..e737c437c9c 100644
--- a/chromium/media/audio/null_audio_sink.cc
+++ b/chromium/media/audio/null_audio_sink.cc
@@ -6,7 +6,7 @@
#include "base/bind.h"
#include "base/single_thread_task_runner.h"
-#include "media/audio/fake_audio_consumer.h"
+#include "media/audio/fake_audio_worker.h"
#include "media/base/audio_hash.h"
namespace media {
@@ -24,7 +24,8 @@ NullAudioSink::~NullAudioSink() {}
void NullAudioSink::Initialize(const AudioParameters& params,
RenderCallback* callback) {
DCHECK(!initialized_);
- fake_consumer_.reset(new FakeAudioConsumer(task_runner_, params));
+ fake_worker_.reset(new FakeAudioWorker(task_runner_, params));
+ audio_bus_ = AudioBus::Create(params);
callback_ = callback;
initialized_ = true;
}
@@ -38,8 +39,8 @@ void NullAudioSink::Stop() {
DCHECK(task_runner_->BelongsToCurrentThread());
// Stop may be called at any time, so we have to check before stopping.
- if (fake_consumer_)
- fake_consumer_->Stop();
+ if (fake_worker_)
+ fake_worker_->Stop();
}
void NullAudioSink::Play() {
@@ -49,7 +50,7 @@ void NullAudioSink::Play() {
if (playing_)
return;
- fake_consumer_->Start(base::Bind(
+ fake_worker_->Start(base::Bind(
&NullAudioSink::CallRender, base::Unretained(this)));
playing_ = true;
}
@@ -60,7 +61,7 @@ void NullAudioSink::Pause() {
if (!playing_)
return;
- fake_consumer_->Stop();
+ fake_worker_->Stop();
playing_ = false;
}
@@ -69,14 +70,14 @@ bool NullAudioSink::SetVolume(double volume) {
return volume == 0.0;
}
-void NullAudioSink::CallRender(AudioBus* audio_bus) {
+void NullAudioSink::CallRender() {
DCHECK(task_runner_->BelongsToCurrentThread());
- int frames_received = callback_->Render(audio_bus, 0);
+ int frames_received = callback_->Render(audio_bus_.get(), 0);
if (!audio_hash_ || frames_received <= 0)
return;
- audio_hash_->Update(audio_bus, frames_received);
+ audio_hash_->Update(audio_bus_.get(), frames_received);
}
void NullAudioSink::StartAudioHashForTesting() {
diff --git a/chromium/media/audio/null_audio_sink.h b/chromium/media/audio/null_audio_sink.h
index d261602c0ec..1abdb996b5c 100644
--- a/chromium/media/audio/null_audio_sink.h
+++ b/chromium/media/audio/null_audio_sink.h
@@ -17,7 +17,7 @@ class SingleThreadTaskRunner;
namespace media {
class AudioBus;
class AudioHash;
-class FakeAudioConsumer;
+class FakeAudioWorker;
class MEDIA_EXPORT NullAudioSink
: NON_EXPORTED_BASE(public AudioRendererSink) {
@@ -44,7 +44,7 @@ class MEDIA_EXPORT NullAudioSink
private:
// Task that periodically calls Render() to consume audio data.
- void CallRender(AudioBus* audio_bus);
+ void CallRender();
bool initialized_;
bool playing_;
@@ -54,7 +54,8 @@ class MEDIA_EXPORT NullAudioSink
scoped_ptr<AudioHash> audio_hash_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- scoped_ptr<FakeAudioConsumer> fake_consumer_;
+ scoped_ptr<FakeAudioWorker> fake_worker_;
+ scoped_ptr<AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(NullAudioSink);
};
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.h b/chromium/media/audio/openbsd/audio_manager_openbsd.h
index 5bd7518c81d..3326952bb04 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.h
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.h
@@ -17,26 +17,26 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
AudioManagerOpenBSD(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() override;
- virtual bool HasAudioInputDevices() override;
- virtual AudioParameters GetInputStreamParameters(
+ bool HasAudioOutputDevices() override;
+ bool HasAudioInputDevices() override;
+ AudioParameters GetInputStreamParameters(
const std::string& device_id) override;
// Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
+ AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) override;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
const std::string& device_id) override;
- virtual AudioInputStream* MakeLinearInputStream(
+ AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) override;
- virtual AudioInputStream* MakeLowLatencyInputStream(
+ AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) override;
protected:
- virtual ~AudioManagerOpenBSD();
+ ~AudioManagerOpenBSD() override;
- virtual AudioParameters GetPreferredOutputStreamParameters(
+ AudioParameters GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) override;
diff --git a/chromium/media/audio/pulse/pulse.sigs b/chromium/media/audio/pulse/pulse.sigs
index 8d2dab70c60..b9c80882ce1 100644
--- a/chromium/media/audio/pulse/pulse.sigs
+++ b/chromium/media/audio/pulse/pulse.sigs
@@ -40,6 +40,10 @@ uint32_t pa_stream_get_device_index(pa_stream* s);
int pa_stream_get_latency(pa_stream* s, pa_usec_t* r_usec, int* negative);
pa_stream_state_t pa_stream_get_state(pa_stream* p);
pa_stream* pa_stream_new(pa_context* c, const char* name, const pa_sample_spec* ss, const pa_channel_map * map);
+pa_stream* pa_stream_new_with_proplist(pa_context* c, const char* name, const pa_sample_spec* ss, const pa_channel_map* map, pa_proplist* p);
+pa_proplist* pa_proplist_new(void);
+void pa_proplist_free(pa_proplist* p);
+int pa_proplist_sets(pa_proplist* p, const char* key, const char* value);
size_t pa_stream_readable_size(pa_stream *p);
int pa_stream_peek(pa_stream* p, const void** data, size_t* nbytes);
void pa_stream_set_read_callback(pa_stream* p, pa_stream_request_cb_t cb, void* userdata);
diff --git a/chromium/media/audio/pulse/pulse_input.cc b/chromium/media/audio/pulse/pulse_input.cc
index 7e2ca22a7a5..3d3c770ddaf 100644
--- a/chromium/media/audio/pulse/pulse_input.cc
+++ b/chromium/media/audio/pulse/pulse_input.cc
@@ -34,8 +34,7 @@ PulseAudioInputStream::PulseAudioInputStream(AudioManagerPulse* audio_manager,
kNumberOfBlocksBufferInFifo),
pa_mainloop_(mainloop),
pa_context_(context),
- handle_(NULL),
- context_state_changed_(false) {
+ handle_(NULL) {
DCHECK(mainloop);
DCHECK(context);
CHECK(params_.IsValid());
@@ -79,7 +78,8 @@ void PulseAudioInputStream::Start(AudioInputCallback* callback) {
pa_stream_readable_size(handle_);
stream_started_ = true;
- pa_operation* operation = pa_stream_cork(handle_, 0, NULL, NULL);
+ pa_operation* operation =
+ pa_stream_cork(handle_, 0, &pulse::StreamSuccessCallback, pa_mainloop_);
WaitForOperationCompletion(pa_mainloop_, operation);
}
diff --git a/chromium/media/audio/pulse/pulse_input.h b/chromium/media/audio/pulse/pulse_input.h
index e3316cd7c53..b21655b2fde 100644
--- a/chromium/media/audio/pulse/pulse_input.h
+++ b/chromium/media/audio/pulse/pulse_input.h
@@ -76,9 +76,6 @@ class PulseAudioInputStream : public AgcAudioStream<AudioInputStream> {
pa_context* pa_context_; // Weak.
pa_stream* handle_;
- // Flag indicating the state of the context has been changed.
- bool context_state_changed_;
-
base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(PulseAudioInputStream);
diff --git a/chromium/media/audio/pulse/pulse_output.cc b/chromium/media/audio/pulse/pulse_output.cc
index 1048113cf25..5168f356a3c 100644
--- a/chromium/media/audio/pulse/pulse_output.cc
+++ b/chromium/media/audio/pulse/pulse_output.cc
@@ -8,7 +8,6 @@
#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_parameters.h"
#include "media/audio/pulse/pulse_util.h"
namespace media {
diff --git a/chromium/media/audio/pulse/pulse_util.cc b/chromium/media/audio/pulse/pulse_util.cc
index c06195eb198..7e579d552c9 100644
--- a/chromium/media/audio/pulse/pulse_util.cc
+++ b/chromium/media/audio/pulse/pulse_util.cc
@@ -15,6 +15,12 @@ namespace pulse {
namespace {
+#if defined(GOOGLE_CHROME_BUILD)
+static const char kBrowserDisplayName[] = "google-chrome";
+#else
+static const char kBrowserDisplayName[] = "chromium-browser";
+#endif
+
pa_channel_position ChromiumToPAChannelPosition(Channels channel) {
switch (channel) {
// PulseAudio does not differentiate between left/right and
@@ -47,6 +53,18 @@ pa_channel_position ChromiumToPAChannelPosition(Channels channel) {
}
}
+class ScopedPropertyList {
+ public:
+ ScopedPropertyList() : property_list_(pa_proplist_new()) {}
+ ~ScopedPropertyList() { pa_proplist_free(property_list_); }
+
+ pa_proplist* get() const { return property_list_; }
+
+ private:
+ pa_proplist* property_list_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedPropertyList);
+};
+
} // namespace
// static, pa_stream_success_cb_t
@@ -157,8 +175,14 @@ bool CreateInputStream(pa_threaded_mainloop* mainloop,
pa_channel_map* map = (source_channel_map.channels != 0) ?
&source_channel_map : NULL;
- // Create a new recording stream.
- *stream = pa_stream_new(context, "RecordStream", &sample_specifications, map);
+ // Create a new recording stream and
+ // tells PulseAudio what the stream icon should be.
+ ScopedPropertyList property_list;
+ pa_proplist_sets(property_list.get(), PA_PROP_APPLICATION_ICON_NAME,
+ kBrowserDisplayName);
+ *stream = pa_stream_new_with_proplist(context, "RecordStream",
+ &sample_specifications, map,
+ property_list.get());
RETURN_ON_FAILURE(*stream, "failed to create PA recording stream");
pa_stream_set_state_callback(*stream, stream_callback, user_data);
@@ -250,7 +274,7 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
sample_specifications.rate = params.sample_rate();
sample_specifications.channels = params.channels();
- // Get channel mapping and open playback stream.
+ // Get channel mapping.
pa_channel_map* map = NULL;
pa_channel_map source_channel_map = ChannelLayoutToPAChannelMap(
params.channel_layout());
@@ -259,7 +283,14 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
// than the default channel map (NULL).
map = &source_channel_map;
}
- *stream = pa_stream_new(*context, "Playback", &sample_specifications, map);
+
+ // Open playback stream and
+ // tell PulseAudio what the stream icon should be.
+ ScopedPropertyList property_list;
+ pa_proplist_sets(property_list.get(), PA_PROP_APPLICATION_ICON_NAME,
+ kBrowserDisplayName);
+ *stream = pa_stream_new_with_proplist(
+ *context, "Playback", &sample_specifications, map, property_list.get());
RETURN_ON_FAILURE(*stream, "failed to create PA playback stream");
pa_stream_set_state_callback(*stream, stream_callback, user_data);
diff --git a/chromium/media/audio/simple_sources.cc b/chromium/media/audio/simple_sources.cc
index 28a0f5f881b..877f9fd5bda 100644
--- a/chromium/media/audio/simple_sources.cc
+++ b/chromium/media/audio/simple_sources.cc
@@ -9,10 +9,99 @@
#include <algorithm>
+#include "base/files/file.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "media/audio/sounds/wav_audio_handler.h"
+#include "media/base/audio_bus.h"
namespace media {
+// Opens |wav_filename|, reads it and loads it as a wav file. This function will
+// return a null pointer if we can't read the file or if it's malformed. The
+// caller takes ownership of the returned data. The size of the data is stored
+// in |read_length|.
+static scoped_ptr<uint8[]> ReadWavFile(const base::FilePath& wav_filename,
+ size_t* file_length) {
+ base::File wav_file(
+ wav_filename, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ if (!wav_file.IsValid()) {
+ LOG(ERROR) << "Failed to read " << wav_filename.value()
+ << " as input to the fake device.";
+ return nullptr;
+ }
+
+ size_t wav_file_length = wav_file.GetLength();
+ if (wav_file_length == 0u) {
+ LOG(ERROR) << "Input file to fake device is empty: "
+ << wav_filename.value();
+ return nullptr;
+ }
+
+ uint8* wav_file_data = new uint8[wav_file_length];
+ size_t read_bytes = wav_file.Read(0, reinterpret_cast<char*>(wav_file_data),
+ wav_file_length);
+ if (read_bytes != wav_file_length) {
+ LOG(ERROR) << "Failed to read all bytes of " << wav_filename.value();
+ return nullptr;
+ }
+ *file_length = wav_file_length;
+ return scoped_ptr<uint8[]>(wav_file_data);
+}
+
+// Opens |wav_filename|, reads it and loads it as a wav file. This function will
+// bluntly trigger CHECKs if we can't read the file or if it's malformed.
+static scoped_ptr<WavAudioHandler> CreateWavAudioHandler(
+ const base::FilePath& wav_filename, const uint8* wav_file_data,
+ size_t wav_file_length, const AudioParameters& expected_params) {
+ base::StringPiece wav_data(reinterpret_cast<const char*>(wav_file_data),
+ wav_file_length);
+ scoped_ptr<WavAudioHandler> wav_audio_handler(new WavAudioHandler(wav_data));
+ return wav_audio_handler.Pass();
+}
+
+// These values are based on experiments for local-to-local
+// PeerConnection to demonstrate audio/video synchronization.
+static const int kBeepDurationMilliseconds = 20;
+static const int kBeepFrequency = 400;
+
+// Intervals between two automatic beeps.
+static const int kAutomaticBeepIntervalInMs = 500;
+
+// Automatic beep will be triggered every |kAutomaticBeepIntervalInMs| unless
+// users explicitly call BeepOnce(), which will disable the automatic beep.
+class BeepContext {
+ public:
+ BeepContext() : beep_once_(false), automatic_beep_(true) {}
+
+ void SetBeepOnce(bool enable) {
+ base::AutoLock auto_lock(lock_);
+ beep_once_ = enable;
+
+ // Disable the automatic beep if users explicit set |beep_once_| to true.
+ if (enable)
+ automatic_beep_ = false;
+ }
+
+ bool beep_once() const {
+ base::AutoLock auto_lock(lock_);
+ return beep_once_;
+ }
+
+ bool automatic_beep() const {
+ base::AutoLock auto_lock(lock_);
+ return automatic_beep_;
+ }
+
+ private:
+ mutable base::Lock lock_;
+ bool beep_once_;
+ bool automatic_beep_;
+};
+
+static base::LazyInstance<BeepContext>::Leaky g_beep_context =
+ LAZY_INSTANCE_INITIALIZER;
+
//////////////////////////////////////////////////////////////////////////////
// SineWaveAudioSource implementation.
@@ -26,6 +115,9 @@ SineWaveAudioSource::SineWaveAudioSource(int channels,
errors_(0) {
}
+SineWaveAudioSource::~SineWaveAudioSource() {
+}
+
// The implementation could be more efficient if a lookup table is constructed
// but it is efficient enough for our simple needs.
int SineWaveAudioSource::OnMoreData(AudioBus* audio_bus,
@@ -63,4 +155,150 @@ void SineWaveAudioSource::Reset() {
time_state_ = 0;
}
+FileSource::FileSource(const AudioParameters& params,
+ const base::FilePath& path_to_wav_file)
+ : params_(params),
+ path_to_wav_file_(path_to_wav_file),
+ wav_file_read_pos_(0),
+ load_failed_(false) {
+}
+
+FileSource::~FileSource() {
+}
+
+void FileSource::LoadWavFile(const base::FilePath& path_to_wav_file) {
+ // Don't try again if we already failed.
+ if (load_failed_)
+ return;
+
+ // Read the file, and put its data in a scoped_ptr so it gets deleted later.
+ size_t file_length = 0;
+ wav_file_data_ = ReadWavFile(path_to_wav_file, &file_length);
+ if (!wav_file_data_) {
+ load_failed_ = true;
+ return;
+ }
+
+ wav_audio_handler_ = CreateWavAudioHandler(
+ path_to_wav_file, wav_file_data_.get(), file_length, params_);
+
+ // Hook us up so we pull in data from the file into the converter. We need to
+ // modify the wav file's audio parameters since we'll be reading small slices
+ // of it at a time and not the whole thing (like 10 ms at a time).
+ AudioParameters file_audio_slice(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ GuessChannelLayout(wav_audio_handler_->num_channels()),
+ wav_audio_handler_->sample_rate(), wav_audio_handler_->bits_per_sample(),
+ params_.frames_per_buffer());
+
+ file_audio_converter_.reset(
+ new AudioConverter(file_audio_slice, params_, false));
+ file_audio_converter_->AddInput(this);
+}
+
+int FileSource::OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) {
+ // Load the file if we haven't already. This load needs to happen on the
+ // audio thread, otherwise we'll run on the UI thread on Mac for instance.
+ // This will massively delay the first OnMoreData, but we'll catch up.
+ if (!wav_audio_handler_)
+ LoadWavFile(path_to_wav_file_);
+ if (load_failed_)
+ return 0;
+
+ DCHECK(wav_audio_handler_.get());
+
+ // Stop playing if we've played out the whole file.
+ if (wav_audio_handler_->AtEnd(wav_file_read_pos_))
+ return 0;
+
+ // This pulls data from ProvideInput.
+ file_audio_converter_->Convert(audio_bus);
+ return audio_bus->frames();
+}
+
+double FileSource::ProvideInput(AudioBus* audio_bus_into_converter,
+ base::TimeDelta buffer_delay) {
+ // Unfilled frames will be zeroed by CopyTo.
+ size_t bytes_written;
+ wav_audio_handler_->CopyTo(audio_bus_into_converter, wav_file_read_pos_,
+ &bytes_written);
+ wav_file_read_pos_ += bytes_written;
+ return 1.0;
+}
+
+void FileSource::OnError(AudioOutputStream* stream) {
+}
+
+BeepingSource::BeepingSource(const AudioParameters& params)
+ : buffer_size_(params.GetBytesPerBuffer()),
+ buffer_(new uint8[buffer_size_]),
+ params_(params),
+ last_callback_time_(base::TimeTicks::Now()),
+ beep_duration_in_buffers_(kBeepDurationMilliseconds *
+ params.sample_rate() /
+ params.frames_per_buffer() /
+ 1000),
+ beep_generated_in_buffers_(0),
+ beep_period_in_frames_(params.sample_rate() / kBeepFrequency) {
+}
+
+BeepingSource::~BeepingSource() {
+}
+
+int BeepingSource::OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) {
+ // Accumulate the time from the last beep.
+ interval_from_last_beep_ += base::TimeTicks::Now() - last_callback_time_;
+
+ memset(buffer_.get(), 0, buffer_size_);
+ bool should_beep = false;
+ BeepContext* beep_context = g_beep_context.Pointer();
+ if (beep_context->automatic_beep()) {
+ base::TimeDelta delta = interval_from_last_beep_ -
+ base::TimeDelta::FromMilliseconds(kAutomaticBeepIntervalInMs);
+ if (delta > base::TimeDelta()) {
+ should_beep = true;
+ interval_from_last_beep_ = delta;
+ }
+ } else {
+ should_beep = beep_context->beep_once();
+ beep_context->SetBeepOnce(false);
+ }
+
+ // If this object was instructed to generate a beep or has started to
+ // generate a beep sound.
+ if (should_beep || beep_generated_in_buffers_) {
+ // Compute the number of frames to output high value. Then compute the
+ // number of bytes based on channels and bits per channel.
+ int high_frames = beep_period_in_frames_ / 2;
+ int high_bytes = high_frames * params_.bits_per_sample() *
+ params_.channels() / 8;
+
+ // Separate high and low with the same number of bytes to generate a
+ // square wave.
+ int position = 0;
+ while (position + high_bytes <= buffer_size_) {
+ // Write high values first.
+ memset(buffer_.get() + position, 128, high_bytes);
+ // Then leave low values in the buffer with |high_bytes|.
+ position += high_bytes * 2;
+ }
+
+ ++beep_generated_in_buffers_;
+ if (beep_generated_in_buffers_ >= beep_duration_in_buffers_)
+ beep_generated_in_buffers_ = 0;
+ }
+
+ last_callback_time_ = base::TimeTicks::Now();
+ audio_bus->FromInterleaved(
+ buffer_.get(), audio_bus->frames(), params_.bits_per_sample() / 8);
+ return audio_bus->frames();
+}
+
+void BeepingSource::OnError(AudioOutputStream* stream) {
+}
+
+void BeepingSource::BeepOnce() {
+ g_beep_context.Pointer()->SetBeepOnce(true);
+}
+
} // namespace media
diff --git a/chromium/media/audio/simple_sources.h b/chromium/media/audio/simple_sources.h
index 7a2176f6bc2..dcb04fd8453 100644
--- a/chromium/media/audio/simple_sources.h
+++ b/chromium/media/audio/simple_sources.h
@@ -5,12 +5,16 @@
#ifndef MEDIA_AUDIO_SIMPLE_SOURCES_H_
#define MEDIA_AUDIO_SIMPLE_SOURCES_H_
+#include "base/files/file_path.h"
#include "base/synchronization/lock.h"
#include "media/audio/audio_io.h"
+#include "media/base/audio_converter.h"
#include "media/base/seekable_buffer.h"
namespace media {
+class WavAudioHandler;
+
// An audio source that produces a pure sinusoidal tone.
class MEDIA_EXPORT SineWaveAudioSource
: public AudioOutputStream::AudioSourceCallback {
@@ -19,7 +23,7 @@ class MEDIA_EXPORT SineWaveAudioSource
// hertz and it has to be less than half of the sampling frequency
// |sample_freq| or else you will get aliasing.
SineWaveAudioSource(int channels, double freq, double sample_freq);
- ~SineWaveAudioSource() override {}
+ ~SineWaveAudioSource() override;
// Return up to |cap| samples of data via OnMoreData(). Use Reset() to
// allow more data to be served.
@@ -44,6 +48,55 @@ class MEDIA_EXPORT SineWaveAudioSource
base::Lock time_lock_;
};
+class FileSource : public AudioOutputStream::AudioSourceCallback,
+ public AudioConverter::InputCallback {
+ public:
+ FileSource(const AudioParameters& params,
+ const base::FilePath& path_to_wav_file);
+ ~FileSource() override;
+
+ // Implementation of AudioSourceCallback.
+ int OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) override;
+ void OnError(AudioOutputStream* stream) override;
+
+ private:
+ AudioParameters params_;
+ base::FilePath path_to_wav_file_;
+ scoped_ptr<uint8[]> wav_file_data_;
+ scoped_ptr<WavAudioHandler> wav_audio_handler_;
+ scoped_ptr<AudioConverter> file_audio_converter_;
+ int wav_file_read_pos_;
+ bool load_failed_;
+
+ // Provides audio data from wav_audio_handler_ into the file audio converter.
+ double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) override;
+
+ // Loads the wav file on the first OnMoreData invocation.
+ void LoadWavFile(const base::FilePath& path_to_wav_file);
+};
+
+class BeepingSource : public AudioOutputStream::AudioSourceCallback {
+ public:
+ BeepingSource(const AudioParameters& params);
+ ~BeepingSource() override;
+
+ // Implementation of AudioSourceCallback.
+ int OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) override;
+ void OnError(AudioOutputStream* stream) override;
+
+ static void BeepOnce();
+ private:
+ int buffer_size_;
+ scoped_ptr<uint8[]> buffer_;
+ AudioParameters params_;
+ base::TimeTicks last_callback_time_;
+ base::TimeDelta interval_from_last_beep_;
+ int beep_duration_in_buffers_;
+ int beep_generated_in_buffers_;
+ int beep_period_in_frames_;
+};
+
} // namespace media
#endif // MEDIA_AUDIO_SIMPLE_SOURCES_H_
diff --git a/chromium/media/audio/sounds/audio_stream_handler.cc b/chromium/media/audio/sounds/audio_stream_handler.cc
index ac9c87bcfaf..28758b23713 100644
--- a/chromium/media/audio/sounds/audio_stream_handler.cc
+++ b/chromium/media/audio/sounds/audio_stream_handler.cc
@@ -36,7 +36,7 @@ AudioOutputStream::AudioSourceCallback* g_audio_source_for_testing = NULL;
class AudioStreamHandler::AudioStreamContainer
: public AudioOutputStream::AudioSourceCallback {
public:
- AudioStreamContainer(const WavAudioHandler& wav_audio)
+ explicit AudioStreamContainer(const WavAudioHandler& wav_audio)
: started_(false),
stream_(NULL),
cursor_(0),
@@ -51,12 +51,11 @@ class AudioStreamHandler::AudioStreamContainer
DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
if (!stream_) {
- const AudioParameters& p = wav_audio_.params();
- const AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- p.channel_layout(),
- p.sample_rate(),
- p.bits_per_sample(),
- kDefaultFrameCount);
+ const AudioParameters params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ GuessChannelLayout(wav_audio_.num_channels()),
+ wav_audio_.sample_rate(), wav_audio_.bits_per_sample(),
+ kDefaultFrameCount);
stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(params,
std::string());
if (!stream_ || !stream_->Open()) {
@@ -162,7 +161,11 @@ AudioStreamHandler::AudioStreamHandler(const base::StringPiece& wav_data)
LOG(ERROR) << "Can't get access to audio manager.";
return;
}
- if (!wav_audio_.params().IsValid()) {
+ const AudioParameters params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ GuessChannelLayout(wav_audio_.num_channels()), wav_audio_.sample_rate(),
+ wav_audio_.bits_per_sample(), kDefaultFrameCount);
+ if (!params.IsValid()) {
LOG(ERROR) << "Audio params are invalid.";
return;
}
diff --git a/chromium/media/audio/sounds/sounds_manager.cc b/chromium/media/audio/sounds/sounds_manager.cc
index 58ca2387e25..72a7a3b15c1 100644
--- a/chromium/media/audio/sounds/sounds_manager.cc
+++ b/chromium/media/audio/sounds/sounds_manager.cc
@@ -73,7 +73,7 @@ base::TimeDelta SoundsManagerImpl::GetDuration(SoundKey key) {
return base::TimeDelta();
}
const WavAudioHandler& wav_audio = handlers_[key]->wav_audio_handler();
- return wav_audio.params().GetBufferDuration();
+ return wav_audio.GetDuration();
}
} // namespace
diff --git a/chromium/media/audio/sounds/wav_audio_handler.cc b/chromium/media/audio/sounds/wav_audio_handler.cc
index b87baa8fd3f..c9808394aea 100644
--- a/chromium/media/audio/sounds/wav_audio_handler.cc
+++ b/chromium/media/audio/sounds/wav_audio_handler.cc
@@ -55,9 +55,7 @@ T ReadInt(const base::StringPiece& data, size_t offset) {
namespace media {
WavAudioHandler::WavAudioHandler(const base::StringPiece& wav_data)
- : num_channels_(0),
- sample_rate_(0),
- bits_per_sample_(0) {
+ : num_channels_(0), sample_rate_(0), bits_per_sample_(0), total_frames_(0) {
CHECK_LE(kWavFileHeaderSize, wav_data.size()) << "wav data is too small";
CHECK(wav_data.starts_with(kChunkId) &&
memcmp(wav_data.data() + 8, kFormat, 4) == 0)
@@ -72,12 +70,7 @@ WavAudioHandler::WavAudioHandler(const base::StringPiece& wav_data)
offset += length;
}
- const int frame_count = data_.size() * 8 / num_channels_ / bits_per_sample_;
- params_ = AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- GuessChannelLayout(num_channels_),
- sample_rate_,
- bits_per_sample_,
- frame_count);
+ total_frames_ = data_.size() * 8 / num_channels_ / bits_per_sample_;
}
WavAudioHandler::~WavAudioHandler() {}
@@ -91,24 +84,29 @@ bool WavAudioHandler::CopyTo(AudioBus* bus,
size_t* bytes_written) const {
if (!bus)
return false;
- if (bus->channels() != params_.channels()) {
- DVLOG(1) << "Number of channel mismatch.";
+ if (bus->channels() != num_channels_) {
+ DVLOG(1) << "Number of channels mismatch.";
return false;
}
if (AtEnd(cursor)) {
bus->Zero();
return true;
}
- const int remaining_frames =
- (data_.size() - cursor) / params_.GetBytesPerFrame();
+ const int bytes_per_frame = num_channels_ * bits_per_sample_ / 8;
+ const int remaining_frames = (data_.size() - cursor) / bytes_per_frame;
const int frames = std::min(bus->frames(), remaining_frames);
- bus->FromInterleaved(data_.data() + cursor, frames,
- params_.bits_per_sample() / 8);
- *bytes_written = frames * params_.GetBytesPerFrame();
+
+ bus->FromInterleaved(data_.data() + cursor, frames, bits_per_sample_ / 8);
+ *bytes_written = frames * bytes_per_frame;
bus->ZeroFramesPartial(frames, bus->frames() - frames);
return true;
}
+base::TimeDelta WavAudioHandler::GetDuration() const {
+ return base::TimeDelta::FromSecondsD(total_frames_ /
+ static_cast<double>(sample_rate_));
+}
+
int WavAudioHandler::ParseSubChunk(const base::StringPiece& data) {
if (data.size() < kChunkHeaderSize)
return data.size();
diff --git a/chromium/media/audio/sounds/wav_audio_handler.h b/chromium/media/audio/sounds/wav_audio_handler.h
index 82b5cc5f842..6e404faba18 100644
--- a/chromium/media/audio/sounds/wav_audio_handler.h
+++ b/chromium/media/audio/sounds/wav_audio_handler.h
@@ -7,7 +7,6 @@
#include "base/strings/string_piece.h"
#include "base/time/time.h"
-#include "media/audio/audio_parameters.h"
#include "media/base/media_export.h"
namespace media {
@@ -29,8 +28,15 @@ class MEDIA_EXPORT WavAudioHandler {
// |bytes_written|. |bytes_written| should not be NULL.
bool CopyTo(AudioBus* bus, size_t cursor, size_t* bytes_written) const;
- const AudioParameters& params() const { return params_; }
+ // Accessors.
const base::StringPiece& data() const { return data_; }
+ uint16_t num_channels() const { return num_channels_; }
+ uint32_t sample_rate() const { return sample_rate_; }
+ uint16_t bits_per_sample() const { return bits_per_sample_; }
+ uint32_t total_frames() const { return total_frames_; }
+
+ // Returns the duration of the entire audio chunk.
+ base::TimeDelta GetDuration() const;
private:
// Parses a chunk of wav format data. Returns the length of the chunk.
@@ -45,11 +51,10 @@ class MEDIA_EXPORT WavAudioHandler {
// Data part of the |wav_data_|.
base::StringPiece data_;
- AudioParameters params_;
-
- uint16 num_channels_;
- uint32 sample_rate_;
- uint16 bits_per_sample_;
+ uint16_t num_channels_;
+ uint32_t sample_rate_;
+ uint16_t bits_per_sample_;
+ uint32_t total_frames_;
};
} // namespace media
diff --git a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
index 6098b9399e0..d4ec83a5838 100644
--- a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
+++ b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
@@ -16,18 +16,18 @@ namespace media {
TEST(WavAudioHandlerTest, SampleDataTest) {
WavAudioHandler handler(base::StringPiece(kTestAudioData,
arraysize(kTestAudioData)));
- const AudioParameters& params = handler.params();
- ASSERT_EQ(2, params.channels());
- ASSERT_EQ(16, params.bits_per_sample());
- ASSERT_EQ(48000, params.sample_rate());
- ASSERT_EQ(192000, params.GetBytesPerSecond());
+ ASSERT_EQ(2u, handler.num_channels());
+ ASSERT_EQ(16u, handler.bits_per_sample());
+ ASSERT_EQ(48000u, handler.sample_rate());
+ ASSERT_EQ(1u, handler.total_frames());
+ ASSERT_EQ(20u, handler.GetDuration().InMicroseconds());
ASSERT_EQ(4U, handler.data().size());
const char kData[] = "\x01\x00\x01\x00";
ASSERT_EQ(base::StringPiece(kData, arraysize(kData) - 1), handler.data());
scoped_ptr<AudioBus> bus = AudioBus::Create(
- params.channels(), handler.data().size() / params.channels());
+ handler.num_channels(), handler.data().size() / handler.num_channels());
size_t bytes_written;
ASSERT_TRUE(handler.CopyTo(bus.get(), 0, &bytes_written));
diff --git a/chromium/media/audio/virtual_audio_input_stream.cc b/chromium/media/audio/virtual_audio_input_stream.cc
index a961b53493b..e586ac88618 100644
--- a/chromium/media/audio/virtual_audio_input_stream.cc
+++ b/chromium/media/audio/virtual_audio_input_stream.cc
@@ -38,7 +38,7 @@ class LoopbackAudioConverter : public AudioConverter::InputCallback {
private:
double ProvideInput(AudioBus* audio_bus,
base::TimeDelta buffer_delay) override {
- audio_converter_.Convert(audio_bus);
+ audio_converter_.ConvertWithDelay(buffer_delay, audio_bus);
return 1.0;
}
@@ -58,7 +58,8 @@ VirtualAudioInputStream::VirtualAudioInputStream(
params_(params),
mixer_(params_, params_, false),
num_attached_output_streams_(0),
- fake_consumer_(worker_task_runner_, params_) {
+ fake_worker_(worker_task_runner_, params_),
+ audio_bus_(AudioBus::Create(params)) {
DCHECK(params_.IsValid());
DCHECK(worker_task_runner_.get());
@@ -89,13 +90,13 @@ bool VirtualAudioInputStream::Open() {
void VirtualAudioInputStream::Start(AudioInputCallback* callback) {
DCHECK(thread_checker_.CalledOnValidThread());
callback_ = callback;
- fake_consumer_.Start(base::Bind(
+ fake_worker_.Start(base::Bind(
&VirtualAudioInputStream::PumpAudio, base::Unretained(this)));
}
void VirtualAudioInputStream::Stop() {
DCHECK(thread_checker_.CalledOnValidThread());
- fake_consumer_.Stop();
+ fake_worker_.Stop();
callback_ = NULL;
}
@@ -132,14 +133,18 @@ void VirtualAudioInputStream::RemoveOutputStream(
DCHECK_LE(0, num_attached_output_streams_);
}
-void VirtualAudioInputStream::PumpAudio(AudioBus* audio_bus) {
+void VirtualAudioInputStream::PumpAudio() {
DCHECK(worker_task_runner_->BelongsToCurrentThread());
{
base::AutoLock scoped_lock(converter_network_lock_);
- mixer_.Convert(audio_bus);
+ // Because the audio is being looped-back, the delay until it will be played
+ // out is zero.
+ mixer_.ConvertWithDelay(base::TimeDelta(), audio_bus_.get());
}
- callback_->OnData(this, audio_bus, params_.GetBytesPerBuffer(), 1.0);
+ // Because the audio is being looped-back, the delay since since it was
+ // recorded is zero.
+ callback_->OnData(this, audio_bus_.get(), 0, 1.0);
}
void VirtualAudioInputStream::Close() {
@@ -167,7 +172,9 @@ double VirtualAudioInputStream::GetVolume() {
return 1.0;
}
-void VirtualAudioInputStream::SetAutomaticGainControl(bool enabled) {}
+bool VirtualAudioInputStream::SetAutomaticGainControl(bool enabled) {
+ return false;
+}
bool VirtualAudioInputStream::GetAutomaticGainControl() {
return false;
diff --git a/chromium/media/audio/virtual_audio_input_stream.h b/chromium/media/audio/virtual_audio_input_stream.h
index 66d1a46feec..d64ef359e96 100644
--- a/chromium/media/audio/virtual_audio_input_stream.h
+++ b/chromium/media/audio/virtual_audio_input_stream.h
@@ -14,7 +14,7 @@
#include "base/threading/thread_checker.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/fake_audio_consumer.h"
+#include "media/audio/fake_audio_worker.h"
#include "media/base/audio_converter.h"
namespace base {
@@ -54,7 +54,7 @@ class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
double GetMaxVolume() override;
void SetVolume(double volume) override;
double GetVolume() override;
- void SetAutomaticGainControl(bool enabled) override;
+ bool SetAutomaticGainControl(bool enabled) override;
bool GetAutomaticGainControl() override;
bool IsMuted() override;
@@ -77,7 +77,7 @@ class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
// Pulls audio data from all attached VirtualAudioOutputStreams, mixes and
// converts the streams into one, and pushes the result to |callback_|.
// Invoked on the worker thread.
- void PumpAudio(AudioBus* audio_bus);
+ void PumpAudio();
const scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
@@ -105,7 +105,9 @@ class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
int num_attached_output_streams_;
// Handles callback timing for consumption of audio data.
- FakeAudioConsumer fake_consumer_;
+ FakeAudioWorker fake_worker_;
+
+ scoped_ptr<AudioBus> audio_bus_;
base::ThreadChecker thread_checker_;
diff --git a/chromium/media/audio/virtual_audio_input_stream_unittest.cc b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
index 3ff07b8f41c..c960f36c7ed 100644
--- a/chromium/media/audio/virtual_audio_input_stream_unittest.cc
+++ b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
@@ -116,12 +116,12 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
void Start() {
EXPECT_CALL(input_callback_, OnData(_, NotNull(), _, _)).Times(AtLeast(1));
- ASSERT_TRUE(!!stream_);
+ ASSERT_TRUE(stream_);
stream_->Start(&input_callback_);
}
void CreateAndStartOneOutputStream() {
- ASSERT_TRUE(!!stream_);
+ ASSERT_TRUE(stream_);
AudioOutputStream* const output_stream = new VirtualAudioOutputStream(
kParams,
stream_,
@@ -133,12 +133,12 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
}
void Stop() {
- ASSERT_TRUE(!!stream_);
+ ASSERT_TRUE(stream_);
stream_->Stop();
}
void Close() {
- ASSERT_TRUE(!!stream_);
+ ASSERT_TRUE(stream_);
stream_->Close();
stream_ = NULL;
closed_stream_.Signal();
@@ -163,7 +163,7 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
void StopAndCloseOneOutputStream() {
ASSERT_TRUE(!output_streams_.empty());
AudioOutputStream* const output_stream = output_streams_.front();
- ASSERT_TRUE(!!output_stream);
+ ASSERT_TRUE(output_stream);
output_streams_.pop_front();
output_stream->Stop();
@@ -173,7 +173,7 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
void StopFirstOutputStream() {
ASSERT_TRUE(!output_streams_.empty());
AudioOutputStream* const output_stream = output_streams_.front();
- ASSERT_TRUE(!!output_stream);
+ ASSERT_TRUE(output_stream);
output_streams_.pop_front();
output_stream->Stop();
stopped_output_streams_.push_back(output_stream);
diff --git a/chromium/media/audio/virtual_audio_output_stream.cc b/chromium/media/audio/virtual_audio_output_stream.cc
index 016c2f331b3..29e3804fe21 100644
--- a/chromium/media/audio/virtual_audio_output_stream.cc
+++ b/chromium/media/audio/virtual_audio_output_stream.cc
@@ -77,7 +77,12 @@ double VirtualAudioOutputStream::ProvideInput(AudioBus* audio_bus,
// platform.
DCHECK(callback_);
- const int frames = callback_->OnMoreData(audio_bus, 0);
+ DCHECK_GE(buffer_delay, base::TimeDelta());
+ const int64 upstream_delay_in_bytes =
+ params_.GetBytesPerSecond() * buffer_delay /
+ base::TimeDelta::FromSeconds(1);
+ const int frames = callback_->OnMoreData(
+ audio_bus, static_cast<uint32>(upstream_delay_in_bytes));
if (frames < audio_bus->frames())
audio_bus->ZeroFramesPartial(frames, audio_bus->frames() - frames);
diff --git a/chromium/media/audio/win/audio_device_listener_win.cc b/chromium/media/audio/win/audio_device_listener_win.cc
index ee2b903583f..624c0ccd7ab 100644
--- a/chromium/media/audio/win/audio_device_listener_win.cc
+++ b/chromium/media/audio/win/audio_device_listener_win.cc
@@ -34,7 +34,7 @@ static std::string GetDeviceId(EDataFlow flow,
ERole role) {
ScopedComPtr<IMMDevice> device =
CoreAudioUtil::CreateDefaultDevice(flow, role);
- if (!device) {
+ if (!device.get()) {
// Most probable reason for ending up here is that all audio devices are
// disabled or unplugged.
DVLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
@@ -42,7 +42,7 @@ static std::string GetDeviceId(EDataFlow flow,
}
AudioDeviceName device_name;
- HRESULT hr = CoreAudioUtil::GetDeviceName(device, &device_name);
+ HRESULT hr = CoreAudioUtil::GetDeviceName(device.get(), &device_name);
if (FAILED(hr)) {
DVLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
return std::string();
@@ -57,7 +57,7 @@ AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
ScopedComPtr<IMMDeviceEnumerator> device_enumerator(
CoreAudioUtil::CreateDeviceEnumerator());
- if (!device_enumerator)
+ if (!device_enumerator.get())
return;
HRESULT hr = device_enumerator->RegisterEndpointNotificationCallback(this);
@@ -79,7 +79,7 @@ AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
AudioDeviceListenerWin::~AudioDeviceListenerWin() {
DCHECK(thread_checker_.CalledOnValidThread());
- if (device_enumerator_) {
+ if (device_enumerator_.get()) {
HRESULT hr =
device_enumerator_->UnregisterEndpointNotificationCallback(this);
LOG_IF(ERROR, FAILED(hr)) << "UnregisterEndpointNotificationCallback() "
@@ -158,12 +158,12 @@ STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
if (new_default_device_id)
new_device_id = base::WideToUTF8(new_default_device_id);
- VLOG(1) << "OnDefaultDeviceChanged() "
- << "new_default_device: "
- << (new_default_device_id ?
- CoreAudioUtil::GetFriendlyName(new_device_id) : "No device")
- << ", flow: " << FlowToString(flow)
- << ", role: " << RoleToString(role);
+ DVLOG(1) << "OnDefaultDeviceChanged() "
+ << "new_default_device: "
+ << (new_default_device_id ?
+ CoreAudioUtil::GetFriendlyName(new_device_id) : "No device")
+ << ", flow: " << FlowToString(flow)
+ << ", role: " << RoleToString(role);
// Only fire a state change event if the device has actually changed.
// TODO(dalecurtis): This still seems to fire an extra event on my machine for
diff --git a/chromium/media/audio/win/audio_device_listener_win.h b/chromium/media/audio/win/audio_device_listener_win.h
index 92777a12a0d..9c2ac4824a6 100644
--- a/chromium/media/audio/win/audio_device_listener_win.h
+++ b/chromium/media/audio/win/audio_device_listener_win.h
@@ -36,15 +36,17 @@ class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
friend class AudioDeviceListenerWinTest;
// IMMNotificationClient implementation.
- STDMETHOD_(ULONG, AddRef)();
- STDMETHOD_(ULONG, Release)();
- STDMETHOD(QueryInterface)(REFIID iid, void** object);
- STDMETHOD(OnPropertyValueChanged)(LPCWSTR device_id, const PROPERTYKEY key);
- STDMETHOD(OnDeviceAdded)(LPCWSTR device_id);
- STDMETHOD(OnDeviceRemoved)(LPCWSTR device_id);
- STDMETHOD(OnDeviceStateChanged)(LPCWSTR device_id, DWORD new_state);
- STDMETHOD(OnDefaultDeviceChanged)(EDataFlow flow, ERole role,
- LPCWSTR new_default_device_id);
+ STDMETHOD_(ULONG, AddRef)() override;
+ STDMETHOD_(ULONG, Release)() override;
+ STDMETHOD(QueryInterface)(REFIID iid, void** object) override;
+ STDMETHOD(OnPropertyValueChanged)(LPCWSTR device_id,
+ const PROPERTYKEY key) override;
+ STDMETHOD(OnDeviceAdded)(LPCWSTR device_id) override;
+ STDMETHOD(OnDeviceRemoved)(LPCWSTR device_id) override;
+ STDMETHOD(OnDeviceStateChanged)(LPCWSTR device_id, DWORD new_state) override;
+ STDMETHOD(OnDefaultDeviceChanged)(EDataFlow flow,
+ ERole role,
+ LPCWSTR new_default_device_id) override;
base::Closure listener_cb_;
ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
diff --git a/chromium/media/audio/win/audio_device_listener_win_unittest.cc b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
index 49a13592766..052b1bb8c39 100644
--- a/chromium/media/audio/win/audio_device_listener_win_unittest.cc
+++ b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
@@ -10,6 +10,7 @@
#include "base/strings/utf_string_conversions.h"
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/win/audio_device_listener_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -25,8 +26,8 @@ static const char kSecondTestDevice[] = "test_device_1";
class AudioDeviceListenerWinTest : public testing::Test {
public:
- AudioDeviceListenerWinTest()
- : com_init_(ScopedCOMInitializer::kMTA) {
+ AudioDeviceListenerWinTest() {
+ DCHECK(com_init_.succeeded());
}
virtual void SetUp() {
@@ -65,8 +66,7 @@ class AudioDeviceListenerWinTest : public testing::Test {
// Simulate a device change events and ensure we get the right callbacks.
TEST_F(AudioDeviceListenerWinTest, OutputDeviceChange) {
- if (!CoreAudioUtil::IsSupported())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(CoreAudioUtil::IsSupported());
SetOutputDeviceId(kNoDevice);
EXPECT_CALL(*this, OnDeviceChange()).Times(1);
@@ -84,8 +84,7 @@ TEST_F(AudioDeviceListenerWinTest, OutputDeviceChange) {
// Ensure that null output device changes don't crash. Simulates the situation
// where we have no output devices.
TEST_F(AudioDeviceListenerWinTest, NullOutputDeviceChange) {
- if (!CoreAudioUtil::IsSupported())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(CoreAudioUtil::IsSupported());
SetOutputDeviceId(kNoDevice);
EXPECT_CALL(*this, OnDeviceChange()).Times(0);
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index 8df8620b9f3..72d1d725f5b 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -168,7 +168,7 @@ void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
HRESULT hr = audio_client_->Start();
DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming.";
- if (SUCCEEDED(hr) && audio_render_client_for_loopback_)
+ if (SUCCEEDED(hr) && audio_render_client_for_loopback_.get())
hr = audio_render_client_for_loopback_->Start();
started_ = SUCCEEDED(hr);
@@ -240,9 +240,10 @@ void WASAPIAudioInputStream::SetVolume(double volume) {
// Set a new master volume level. Valid volume levels are in the range
// 0.0 to 1.0. Ignore volume-change events.
- HRESULT hr = simple_audio_volume_->SetMasterVolume(static_cast<float>(volume),
- NULL);
- DLOG_IF(WARNING, FAILED(hr)) << "Failed to set new input master volume.";
+ HRESULT hr =
+ simple_audio_volume_->SetMasterVolume(static_cast<float>(volume), NULL);
+ if (FAILED(hr))
+ DLOG(WARNING) << "Failed to set new input master volume.";
// Update the AGC volume level based on the last setting above. Note that,
// the volume-level resolution is not infinite and it is therefore not
@@ -260,7 +261,8 @@ double WASAPIAudioInputStream::GetVolume() {
// Retrieve the current volume level. The value is in the range 0.0 to 1.0.
float level = 0.0f;
HRESULT hr = simple_audio_volume_->GetMasterVolume(&level);
- DLOG_IF(WARNING, FAILED(hr)) << "Failed to get input master volume.";
+ if (FAILED(hr))
+ DLOG(WARNING) << "Failed to get input master volume.";
return static_cast<double>(level);
}
@@ -274,7 +276,8 @@ bool WASAPIAudioInputStream::IsMuted() {
// Retrieves the current muting state for the audio session.
BOOL is_muted = FALSE;
HRESULT hr = simple_audio_volume_->GetMute(&is_muted);
- DLOG_IF(WARNING, FAILED(hr)) << "Failed to get input master volume.";
+ if (FAILED(hr))
+ DLOG(WARNING) << "Failed to get input master volume.";
return is_muted != FALSE;
}
@@ -333,8 +336,10 @@ HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
if (FAILED(hr))
return hr;
- *effects = IsDefaultCommunicationDevice(enumerator, endpoint_device) ?
- AudioParameters::DUCKING : AudioParameters::NO_EFFECTS;
+ *effects =
+ IsDefaultCommunicationDevice(enumerator.get(), endpoint_device.get())
+ ? AudioParameters::DUCKING
+ : AudioParameters::NO_EFFECTS;
ScopedComPtr<IAudioClient> audio_client;
hr = endpoint_device->Activate(__uuidof(IAudioClient),
@@ -348,7 +353,7 @@ void WASAPIAudioInputStream::Run() {
ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
// Increase the thread priority.
- capture_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
+ capture_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO);
// Enable MMCSS to ensure that this thread receives prioritized access to
// CPU resources.
@@ -504,7 +509,7 @@ void WASAPIAudioInputStream::HandleError(HRESULT err) {
}
HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
- DCHECK(!endpoint_device_);
+ DCHECK(!endpoint_device_.get());
ScopedComPtr<IMMDeviceEnumerator> enumerator;
HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
@@ -523,7 +528,8 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
// to be valid matches.
hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
endpoint_device_.Receive());
- if (endpoint_device_ && device_id_ != AudioManagerBase::kDefaultDeviceId) {
+ if (endpoint_device_.get() &&
+ device_id_ != AudioManagerBase::kDefaultDeviceId) {
base::win::ScopedCoMem<WCHAR> communications_id;
endpoint_device_->GetId(&communications_id);
if (device_id_ !=
@@ -540,7 +546,7 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
}
}
- if (!endpoint_device_) {
+ if (!endpoint_device_.get()) {
if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
// Retrieve the default capture audio endpoint for the specified role.
// Note that, in Windows Vista, the MMDevice API supports device roles
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.h b/chromium/media/audio/win/audio_low_latency_input_win.h
index e933a447130..7501405b74d 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.h
+++ b/chromium/media/audio/win/audio_low_latency_input_win.h
@@ -92,17 +92,17 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioInputStream::Close().
- virtual ~WASAPIAudioInputStream();
+ ~WASAPIAudioInputStream() override;
// Implementation of AudioInputStream.
- virtual bool Open() override;
- virtual void Start(AudioInputCallback* callback) override;
- virtual void Stop() override;
- virtual void Close() override;
- virtual double GetMaxVolume() override;
- virtual void SetVolume(double volume) override;
- virtual double GetVolume() override;
- virtual bool IsMuted() override;
+ bool Open() override;
+ void Start(AudioInputCallback* callback) override;
+ void Stop() override;
+ void Close() override;
+ double GetMaxVolume() override;
+ void SetVolume(double volume) override;
+ double GetVolume() override;
+ bool IsMuted() override;
bool started() const { return started_; }
@@ -111,7 +111,7 @@ class MEDIA_EXPORT WASAPIAudioInputStream
private:
// DelegateSimpleThread::Delegate implementation.
- virtual void Run() override;
+ void Run() override;
// Issues the OnError() callback to the |sink_|.
void HandleError(HRESULT err);
diff --git a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
index 4a4939ed68b..8f6b3ce2daa 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -15,13 +15,13 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/win/audio_low_latency_input_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/base/seekable_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-using base::win::ScopedCOMInitializer;
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
@@ -61,16 +61,16 @@ class FakeAudioInputCallback : public AudioInputStream::AudioInputCallback {
data_event_.Wait();
}
- virtual void OnData(AudioInputStream* stream,
- const AudioBus* src,
- uint32 hardware_delay_bytes,
- double volume) override {
+ void OnData(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) override {
EXPECT_NE(hardware_delay_bytes, 0u);
num_received_audio_frames_ += src->frames();
data_event_.Signal();
}
- virtual void OnError(AudioInputStream* stream) override {
+ void OnError(AudioInputStream* stream) override {
error_ = true;
}
@@ -103,7 +103,7 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
VLOG(0) << "bits_per_sample_:" << bits_per_sample_;
}
- virtual ~WriteToFileAudioSink() {
+ ~WriteToFileAudioSink() override {
size_t bytes_written = 0;
while (bytes_written < bytes_to_write_) {
const uint8* chunk;
@@ -122,10 +122,10 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
}
// AudioInputStream::AudioInputCallback implementation.
- virtual void OnData(AudioInputStream* stream,
- const AudioBus* src,
- uint32 hardware_delay_bytes,
- double volume) {
+ void OnData(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) override {
EXPECT_EQ(bits_per_sample_, 16);
const int num_samples = src->frames() * src->channels();
scoped_ptr<int16> interleaved(new int16[num_samples]);
@@ -141,7 +141,7 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
}
}
- virtual void OnError(AudioInputStream* stream) {}
+ void OnError(AudioInputStream* stream) override {}
private:
int bits_per_sample_;
@@ -150,20 +150,11 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
size_t bytes_to_write_;
};
-// Convenience method which ensures that we are not running on the build
-// bots and that at least one valid input device can be found. We also
-// verify that we are not running on XP since the low-latency (WASAPI-
-// based) version requires Windows Vista or higher.
-static bool CanRunAudioTests(AudioManager* audio_man) {
- if (!CoreAudioUtil::IsSupported()) {
- LOG(WARNING) << "This tests requires Windows Vista or higher.";
- return false;
- }
+static bool HasCoreAudioAndInputDevices(AudioManager* audio_man) {
+ // The low-latency (WASAPI-based) version requires Windows Vista or higher.
// TODO(henrika): note that we use Wave today to query the number of
// existing input devices.
- bool input = audio_man->HasAudioInputDevices();
- LOG_IF(WARNING, !input) << "No input device detected.";
- return input;
+ return CoreAudioUtil::IsSupported() && audio_man->HasAudioInputDevices();
}
// Convenience method which creates a default AudioInputStream object but
@@ -171,8 +162,7 @@ static bool CanRunAudioTests(AudioManager* audio_man) {
class AudioInputStreamWrapper {
public:
explicit AudioInputStreamWrapper(AudioManager* audio_manager)
- : com_init_(ScopedCOMInitializer::kMTA),
- audio_man_(audio_manager),
+ : audio_man_(audio_manager),
default_params_(
audio_manager->GetInputStreamParameters(
AudioManagerBase::kDefaultDeviceId)) {
@@ -215,7 +205,6 @@ class AudioInputStreamWrapper {
return ais;
}
- ScopedCOMInitializer com_init_;
AudioManager* audio_man_;
const AudioParameters default_params_;
int frames_per_buffer_;
@@ -266,10 +255,7 @@ class ScopedAudioInputStream {
// for all available input devices.
TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
// Retrieve a list of all available input devices.
media::AudioDeviceNames device_names;
@@ -288,8 +274,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
// Test Create(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager.get()));
ais.Close();
@@ -298,8 +283,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
// Test Open(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager.get()));
EXPECT_TRUE(ais->Open());
@@ -309,8 +293,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
// Test Open(), Start(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager.get()));
EXPECT_TRUE(ais->Open());
@@ -322,8 +305,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
// Test Open(), Start(), Stop(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager.get()));
EXPECT_TRUE(ais->Open());
@@ -336,8 +318,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
// Test some additional calling sequences.
TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
ScopedAudioInputStream ais(
CreateDefaultAudioInputStream(audio_manager.get()));
WASAPIAudioInputStream* wais =
@@ -365,8 +346,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
int count = 0;
base::MessageLoopForUI loop;
@@ -436,8 +416,8 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
// Test that we can capture a stream in loopback.
TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!audio_manager->HasAudioOutputDevices() || !CoreAudioUtil::IsSupported())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(audio_manager->HasAudioOutputDevices() &&
+ CoreAudioUtil::IsSupported());
AudioParameters params = audio_manager->GetInputStreamParameters(
AudioManagerBase::kLoopbackInputDeviceId);
@@ -470,8 +450,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
// environment variable to a value greater than 0.
TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager.get()));
// Name of the output PCM file containing captured data. The output file
// will be stored in the directory containing 'media_unittests.exe'.
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index 8c9ff2f64e5..222f0cfa583 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -7,11 +7,10 @@
#include <Functiondiscoverykeys_devpkey.h>
#include "base/command_line.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
#include "base/metrics/histogram.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/trace_event/trace_event.h"
#include "base/win/scoped_propvariant.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
@@ -27,7 +26,7 @@ namespace media {
// static
AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
return AUDCLNT_SHAREMODE_EXCLUSIVE;
return AUDCLNT_SHAREMODE_SHARED;
@@ -41,12 +40,13 @@ int WASAPIAudioOutputStream::HardwareSampleRate(const std::string& device_id) {
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id));
- if (!device)
+ if (!device.get())
return 0;
- client = CoreAudioUtil::CreateClient(device);
+ client = CoreAudioUtil::CreateClient(device.get());
}
- if (!client || FAILED(CoreAudioUtil::GetSharedModeMixFormat(client, &format)))
+ if (!client.get() ||
+ FAILED(CoreAudioUtil::GetSharedModeMixFormat(client.get(), &format)))
return 0;
return static_cast<int>(format.Format.nSamplesPerSec);
@@ -72,9 +72,9 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
audio_bus_(AudioBus::Create(params)) {
DCHECK(manager_);
- VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
- VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
- << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
+ DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
+ DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
+ << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
bool avrt_init = avrt::Initialize();
@@ -103,10 +103,10 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
// get from the audio endpoint device in each render event.
packet_size_frames_ = params.frames_per_buffer();
packet_size_bytes_ = params.GetBytesPerBuffer();
- VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
- VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
- VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
- VLOG(1) << "Number of milliseconds per packet: "
+ DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
+ DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
+ DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
+ DVLOG(1) << "Number of milliseconds per packet: "
<< params.GetBufferDuration().InMillisecondsF();
// All events are auto-reset events and non-signaled initially.
@@ -126,13 +126,13 @@ WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {
}
bool WASAPIAudioOutputStream::Open() {
- VLOG(1) << "WASAPIAudioOutputStream::Open()";
+ DVLOG(1) << "WASAPIAudioOutputStream::Open()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
if (opened_)
return true;
- DCHECK(!audio_client_);
- DCHECK(!audio_render_client_);
+ DCHECK(!audio_client_.get());
+ DCHECK(!audio_render_client_.get());
// Will be set to true if we ended up opening the default communications
// device.
@@ -146,17 +146,16 @@ bool WASAPIAudioOutputStream::Open() {
communications_device = (device_role_ == eCommunications);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
- DLOG_IF(ERROR, !device) << "Failed to open device: " << device_id_;
- if (device)
- audio_client = CoreAudioUtil::CreateClient(device);
+ DLOG_IF(ERROR, !device.get()) << "Failed to open device: " << device_id_;
+ if (device.get())
+ audio_client = CoreAudioUtil::CreateClient(device.get());
}
- if (!audio_client)
+ if (!audio_client.get())
return false;
// Extra sanity to ensure that the provided device format is still valid.
- if (!CoreAudioUtil::IsFormatSupported(audio_client,
- share_mode_,
+ if (!CoreAudioUtil::IsFormatSupported(audio_client.get(), share_mode_,
&format_)) {
LOG(ERROR) << "Audio parameters are not supported.";
return false;
@@ -167,7 +166,7 @@ bool WASAPIAudioOutputStream::Open() {
// Initialize the audio stream between the client and the device in shared
// mode and using event-driven buffer handling.
hr = CoreAudioUtil::SharedModeInitialize(
- audio_client, &format_, audio_samples_render_event_.Get(),
+ audio_client.get(), &format_, audio_samples_render_event_.Get(),
&endpoint_buffer_size_frames_,
communications_device ? &kCommunicationsSessionId : NULL);
if (FAILED(hr))
@@ -187,7 +186,7 @@ bool WASAPIAudioOutputStream::Open() {
} else {
// TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize()
// when removing the enable-exclusive-audio flag.
- hr = ExclusiveModeInitialization(audio_client,
+ hr = ExclusiveModeInitialization(audio_client.get(),
audio_samples_render_event_.Get(),
&endpoint_buffer_size_frames_);
if (FAILED(hr))
@@ -206,8 +205,8 @@ bool WASAPIAudioOutputStream::Open() {
// The IAudioRenderClient interface enables us to write output data to
// a rendering endpoint buffer.
ScopedComPtr<IAudioRenderClient> audio_render_client =
- CoreAudioUtil::CreateRenderClient(audio_client);
- if (!audio_render_client)
+ CoreAudioUtil::CreateRenderClient(audio_client.get());
+ if (!audio_render_client.get())
return false;
// Store valid COM interfaces.
@@ -226,7 +225,7 @@ bool WASAPIAudioOutputStream::Open() {
}
void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
- VLOG(1) << "WASAPIAudioOutputStream::Start()";
+ DVLOG(1) << "WASAPIAudioOutputStream::Start()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
CHECK(callback);
CHECK(opened_);
@@ -241,7 +240,7 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
// Ensure that the endpoint buffer is prepared with silence.
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
- audio_client_, audio_render_client_)) {
+ audio_client_.get(), audio_render_client_.get())) {
LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
callback->OnError(this);
return;
@@ -271,7 +270,7 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
}
void WASAPIAudioOutputStream::Stop() {
- VLOG(1) << "WASAPIAudioOutputStream::Stop()";
+ DVLOG(1) << "WASAPIAudioOutputStream::Stop()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
if (!render_thread_)
return;
@@ -306,7 +305,7 @@ void WASAPIAudioOutputStream::Stop() {
}
void WASAPIAudioOutputStream::Close() {
- VLOG(1) << "WASAPIAudioOutputStream::Close()";
+ DVLOG(1) << "WASAPIAudioOutputStream::Close()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
// It is valid to call Close() before calling open or Start().
@@ -319,7 +318,7 @@ void WASAPIAudioOutputStream::Close() {
}
void WASAPIAudioOutputStream::SetVolume(double volume) {
- VLOG(1) << "SetVolume(volume=" << volume << ")";
+ DVLOG(1) << "SetVolume(volume=" << volume << ")";
float volume_float = static_cast<float>(volume);
if (volume_float < 0.0f || volume_float > 1.0f) {
return;
@@ -328,7 +327,7 @@ void WASAPIAudioOutputStream::SetVolume(double volume) {
}
void WASAPIAudioOutputStream::GetVolume(double* volume) {
- VLOG(1) << "GetVolume()";
+ DVLOG(1) << "GetVolume()";
*volume = static_cast<double>(volume_);
}
@@ -336,7 +335,7 @@ void WASAPIAudioOutputStream::Run() {
ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
// Increase the thread priority.
- render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
+ render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO);
// Enable MMCSS to ensure that this thread receives prioritized access to
// CPU resources.
@@ -538,7 +537,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
event_handle != INVALID_HANDLE_VALUE);
if (use_event)
stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
- VLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
+ DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
// Initialize the audio stream between the client and the device.
// For an exclusive-mode stream that uses event-driven buffering, the
@@ -561,7 +560,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
UINT32 aligned_buffer_size = 0;
client->GetBufferSize(&aligned_buffer_size);
- VLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
+ DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
// Calculate new aligned periodicity. Each unit of reference time
// is 100 nanoseconds.
@@ -573,9 +572,9 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
// at this stage but we bail out with an error code instead and
// combine it with a log message which informs about the suggested
// aligned buffer size which should be used instead.
- VLOG(1) << "aligned_buffer_duration: "
- << static_cast<double>(aligned_buffer_duration / 10000.0)
- << " [ms]";
+ DVLOG(1) << "aligned_buffer_duration: "
+ << static_cast<double>(aligned_buffer_duration / 10000.0)
+ << " [ms]";
} else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
// We will get this error if we try to use a smaller buffer size than
// the minimum supported size (usually ~3ms on Windows 7).
@@ -587,7 +586,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
if (use_event) {
hr = client->SetEventHandle(event_handle);
if (FAILED(hr)) {
- VLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
+ DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
return hr;
}
}
@@ -595,12 +594,12 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
UINT32 buffer_size_in_frames = 0;
hr = client->GetBufferSize(&buffer_size_in_frames);
if (FAILED(hr)) {
- VLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
+ DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
return hr;
}
*endpoint_buffer_size = buffer_size_in_frames;
- VLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
+ DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
return hr;
}
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index 1584a4603df..9d612b33b25 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -128,15 +128,15 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioOutputStream::Close().
- virtual ~WASAPIAudioOutputStream();
+ ~WASAPIAudioOutputStream() override;
// Implementation of AudioOutputStream.
- virtual bool Open() override;
- virtual void Start(AudioSourceCallback* callback) override;
- virtual void Stop() override;
- virtual void Close() override;
- virtual void SetVolume(double volume) override;
- virtual void GetVolume(double* volume) override;
+ bool Open() override;
+ void Start(AudioSourceCallback* callback) override;
+ void Stop() override;
+ void Close() override;
+ void SetVolume(double volume) override;
+ void GetVolume(double* volume) override;
// Retrieves the sample rate the audio engine uses for its internal
// processing/mixing of shared-mode streams. To fetch the settings for the
@@ -151,7 +151,7 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
private:
// DelegateSimpleThread::Delegate implementation.
- virtual void Run() override;
+ void Run() override;
// Core part of the thread loop which controls the actual rendering.
// Checks available amount of space in the endpoint buffer and reads
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
index bab2a278a06..85516de0cbd 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -16,6 +16,7 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/win/audio_low_latency_output_win.h"
#include "media/audio/win/core_audio_util_win.h"
@@ -36,7 +37,6 @@ using ::testing::Gt;
using ::testing::InvokeWithoutArgs;
using ::testing::NotNull;
using ::testing::Return;
-using base::win::ScopedCOMInitializer;
namespace media {
@@ -80,7 +80,7 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
delta_times_.reset(new int[kMaxDeltaSamples]);
}
- virtual ~ReadFromFileAudioSource() {
+ ~ReadFromFileAudioSource() override {
// Get complete file path to output file in directory containing
// media_unittests.exe.
base::FilePath file_name;
@@ -102,8 +102,7 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
}
// AudioOutputStream::AudioSourceCallback implementation.
- virtual int OnMoreData(AudioBus* audio_bus,
- uint32 total_bytes_delay) {
+ int OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) override {
// Store time difference between two successive callbacks in an array.
// These values will be written to a file in the destructor.
const base::TimeTicks now_time = base::TimeTicks::Now();
@@ -130,7 +129,7 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
return frames;
}
- virtual void OnError(AudioOutputStream* stream) {}
+ void OnError(AudioOutputStream* stream) override {}
int file_size() { return file_->data_size(); }
@@ -148,24 +147,11 @@ static bool ExclusiveModeIsEnabled() {
AUDCLNT_SHAREMODE_EXCLUSIVE);
}
-// Convenience method which ensures that we are not running on the build
-// bots and that at least one valid output device can be found. We also
-// verify that we are not running on XP since the low-latency (WASAPI-
-// based) version requires Windows Vista or higher.
-static bool CanRunAudioTests(AudioManager* audio_man) {
- if (!CoreAudioUtil::IsSupported()) {
- LOG(WARNING) << "This test requires Windows Vista or higher.";
- return false;
- }
-
+static bool HasCoreAudioAndOutputDevices(AudioManager* audio_man) {
+ // The low-latency (WASAPI-based) version requires Windows Vista or higher.
// TODO(henrika): note that we use Wave today to query the number of
// existing output devices.
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output devices detected.";
- return false;
- }
-
- return true;
+ return CoreAudioUtil::IsSupported() && audio_man->HasAudioOutputDevices();
}
// Convenience method which creates a default AudioOutputStream object but
@@ -244,9 +230,10 @@ static AudioOutputStream* CreateDefaultAudioOutputStream(
TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
// Skip this test in exclusive mode since the resulting rate is only utilized
// for shared mode streams.
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()) || ExclusiveModeIsEnabled())
+ if (ExclusiveModeIsEnabled())
return;
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
// Default device intended for games, system notification sounds,
// and voice commands.
@@ -258,8 +245,7 @@ TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
// Test Create(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, CreateAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
aos->Close();
}
@@ -267,8 +253,7 @@ TEST(WASAPIAudioOutputStreamTest, CreateAndClose) {
// Test Open(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, OpenAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
EXPECT_TRUE(aos->Open());
aos->Close();
@@ -277,8 +262,7 @@ TEST(WASAPIAudioOutputStreamTest, OpenAndClose) {
// Test Open(), Start(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, OpenStartAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
EXPECT_TRUE(aos->Open());
MockAudioSourceCallback source;
@@ -291,8 +275,7 @@ TEST(WASAPIAudioOutputStreamTest, OpenStartAndClose) {
// Test Open(), Start(), Stop(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, OpenStartStopAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
EXPECT_TRUE(aos->Open());
MockAudioSourceCallback source;
@@ -306,8 +289,7 @@ TEST(WASAPIAudioOutputStreamTest, OpenStartStopAndClose) {
// Test SetVolume(), GetVolume()
TEST(WASAPIAudioOutputStreamTest, Volume) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
// Initial volume should be full volume (1.0).
@@ -343,8 +325,7 @@ TEST(WASAPIAudioOutputStreamTest, Volume) {
// Test some additional calling sequences.
TEST(WASAPIAudioOutputStreamTest, MiscCallingSequences) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
WASAPIAudioOutputStream* waos = static_cast<WASAPIAudioOutputStream*>(aos);
@@ -383,8 +364,7 @@ TEST(WASAPIAudioOutputStreamTest, MiscCallingSequences) {
// Use preferred packet size and verify that rendering starts.
TEST(WASAPIAudioOutputStreamTest, ValidPacketSize) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
base::MessageLoopForUI loop;
MockAudioSourceCallback source;
@@ -422,8 +402,7 @@ TEST(WASAPIAudioOutputStreamTest, ValidPacketSize) {
// The test files are approximately 20 seconds long.
TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()));
AudioOutputStreamWrapper aosw(audio_manager.get());
AudioOutputStream* aos = aosw.Create();
@@ -443,13 +422,13 @@ TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
}
ReadFromFileAudioSource file_source(file_name);
- VLOG(0) << "File name : " << file_name.c_str();
- VLOG(0) << "Sample rate : " << aosw.sample_rate();
- VLOG(0) << "Bits per sample: " << aosw.bits_per_sample();
- VLOG(0) << "#channels : " << aosw.channels();
- VLOG(0) << "File size : " << file_source.file_size();
- VLOG(0) << "#file segments : " << kNumFileSegments;
- VLOG(0) << ">> Listen to the stereo file while playing...";
+ DVLOG(0) << "File name : " << file_name.c_str();
+ DVLOG(0) << "Sample rate : " << aosw.sample_rate();
+ DVLOG(0) << "Bits per sample: " << aosw.bits_per_sample();
+ DVLOG(0) << "#channels : " << aosw.channels();
+ DVLOG(0) << "File size : " << file_source.file_size();
+ DVLOG(0) << "#file segments : " << kNumFileSegments;
+ DVLOG(0) << ">> Listen to the stereo file while playing...";
for (int i = 0; i < kNumFileSegments; i++) {
// Each segment will start with a short (~20ms) block of zeros, hence
@@ -462,7 +441,7 @@ TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
aos->Stop();
}
- VLOG(0) << ">> Stereo file playout has stopped.";
+ DVLOG(0) << ">> Stereo file playout has stopped.";
aos->Close();
}
@@ -470,13 +449,11 @@ TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
// certain set of audio parameters and a sample rate of 48kHz.
// The expected outcomes of each setting in this test has been derived
// manually using log outputs (--v=1).
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt48kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
+// It's disabled by default because a flag is required to enable exclusive mode.
+TEST(WASAPIAudioOutputStreamTest, DISABLED_ExclusiveModeBufferSizesAt48kHz) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()) &&
+ ExclusiveModeIsEnabled());
AudioOutputStreamWrapper aosw(audio_manager.get());
@@ -521,13 +498,11 @@ TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt48kHz) {
// certain set of audio parameters and a sample rate of 44.1kHz.
// The expected outcomes of each setting in this test has been derived
// manually using log outputs (--v=1).
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt44kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
+// It's disabled by default because a flag is required to enable exclusive mode.
+TEST(WASAPIAudioOutputStreamTest, DISABLED_ExclusiveModeBufferSizesAt44kHz) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()) &&
+ ExclusiveModeIsEnabled());
AudioOutputStreamWrapper aosw(audio_manager.get());
@@ -579,13 +554,11 @@ TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt44kHz) {
// Verify that we can open and start the output stream in exclusive mode at
// the lowest possible delay at 48kHz.
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt48kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
+// It's disabled by default because a flag is required to enable exclusive mode.
+TEST(WASAPIAudioOutputStreamTest, DISABLED_ExclusiveModeMinBufferSizeAt48kHz) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
+ ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager.get()) &&
+ ExclusiveModeIsEnabled());
base::MessageLoopForUI loop;
MockAudioSourceCallback source;
@@ -617,13 +590,10 @@ TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt48kHz) {
// Verify that we can open and start the output stream in exclusive mode at
// the lowest possible delay at 44.1kHz.
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt44kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
+// It's disabled by default because a flag is required to enable exclusive mode.
+TEST(WASAPIAudioOutputStreamTest, DISABLED_ExclusiveModeMinBufferSizeAt44kHz) {
+ ABORT_AUDIO_TEST_IF_NOT(ExclusiveModeIsEnabled());
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
base::MessageLoopForUI loop;
MockAudioSourceCallback source;
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
index b4e5e77d67a..c09fb46284d 100644
--- a/chromium/media/audio/win/audio_manager_win.cc
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -16,6 +16,7 @@
#include "base/files/file_path.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
#include "base/path_service.h"
#include "base/process/launch.h"
#include "base/strings/string_number_conversions.h"
@@ -111,8 +112,9 @@ static base::string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
static int NumberOfWaveOutBuffers() {
// Use the user provided buffer count if provided.
int buffers = 0;
- std::string buffers_str(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kWaveOutBuffers));
+ std::string buffers_str(
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kWaveOutBuffers));
if (base::StringToInt(buffers_str, &buffers) && buffers > 0) {
return buffers;
}
@@ -257,9 +259,9 @@ void AudioManagerWin::ShowAudioInputSettings() {
base::FilePath path;
PathService::Get(base::DIR_SYSTEM, &path);
path = path.Append(program);
- CommandLine command_line(path);
+ base::CommandLine command_line(path);
command_line.AppendArg(argument);
- base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
+ base::LaunchProcess(command_line, base::LaunchOptions());
}
void AudioManagerWin::GetAudioDeviceNamesImpl(
@@ -393,6 +395,7 @@ AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
DVLOG(1) << "MakeLowLatencyInputStream: " << device_id;
AudioInputStream* stream = NULL;
+ UMA_HISTOGRAM_BOOLEAN("Media.WindowsCoreAudioInput", core_audio_supported());
if (!core_audio_supported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
DVLOG(1) << "Using WaveIn since WASAPI requires at least Vista.";
@@ -416,7 +419,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
DLOG_IF(ERROR, !core_audio_supported() && !output_device_id.empty())
<< "CoreAudio is required to open non-default devices.";
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = 48000;
int buffer_size = kFallbackBufferSize;
@@ -480,8 +483,8 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
// Open up using the same channel layout as the source if it is
// supported by the hardware.
channel_layout = input_params.channel_layout();
- VLOG(1) << "Hardware channel layout is not used; using same layout"
- << " as the source instead (" << channel_layout << ")";
+ DVLOG(1) << "Hardware channel layout is not used; using same layout"
+ << " as the source instead (" << channel_layout << ")";
}
}
}
diff --git a/chromium/media/audio/win/audio_manager_win.h b/chromium/media/audio/win/audio_manager_win.h
index ce61eb6cdc9..98265664176 100644
--- a/chromium/media/audio/win/audio_manager_win.h
+++ b/chromium/media/audio/win/audio_manager_win.h
@@ -21,35 +21,35 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
AudioManagerWin(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() override;
- virtual bool HasAudioInputDevices() override;
- virtual base::string16 GetAudioInputDeviceModel() override;
- virtual void ShowAudioInputSettings() override;
- virtual void GetAudioInputDeviceNames(
- AudioDeviceNames* device_names) override;
- virtual void GetAudioOutputDeviceNames(
- AudioDeviceNames* device_names) override;
- virtual AudioParameters GetInputStreamParameters(
+ bool HasAudioOutputDevices() override;
+ bool HasAudioInputDevices() override;
+ base::string16 GetAudioInputDeviceModel() override;
+ void ShowAudioInputSettings() override;
+ void GetAudioInputDeviceNames(AudioDeviceNames* device_names) override;
+ void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) override;
+ AudioParameters GetInputStreamParameters(
const std::string& device_id) override;
- virtual std::string GetAssociatedOutputDeviceID(
+ std::string GetAssociatedOutputDeviceID(
const std::string& input_device_id) override;
// Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
+ AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) override;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
const std::string& device_id) override;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) override;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) override;
- virtual std::string GetDefaultOutputDeviceID() override;
+ AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) override;
+ AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) override;
+ std::string GetDefaultOutputDeviceID() override;
protected:
- virtual ~AudioManagerWin();
+ ~AudioManagerWin() override;
- virtual AudioParameters GetPreferredOutputStreamParameters(
+ AudioParameters GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) override;
diff --git a/chromium/media/audio/win/audio_output_win_unittest.cc b/chromium/media/audio/win/audio_output_win_unittest.cc
index 00e67507cee..3ef817b5eb5 100644
--- a/chromium/media/audio/win/audio_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_output_win_unittest.cc
@@ -14,6 +14,7 @@
#include "media/base/limits.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/simple_sources.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -29,8 +30,6 @@ using ::testing::NiceMock;
using ::testing::NotNull;
using ::testing::Return;
-using base::win::ScopedCOMInitializer;
-
namespace media {
static const wchar_t kAudioFile1_16b_m_16K[]
@@ -50,17 +49,14 @@ class TestSourceBasic : public AudioOutputStream::AudioSourceCallback {
had_error_(0) {
}
// AudioSourceCallback::OnMoreData implementation:
- virtual int OnMoreData(AudioBus* audio_bus,
- uint32 total_bytes_delay) {
+ int OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) override {
++callback_count_;
// Touch the channel memory value to make sure memory is good.
audio_bus->Zero();
return audio_bus->frames();
}
// AudioSourceCallback::OnError implementation:
- virtual void OnError(AudioOutputStream* stream) {
- ++had_error_;
- }
+ void OnError(AudioOutputStream* stream) override { ++had_error_; }
// Returns how many times OnMoreData() has been called.
int callback_count() const {
return callback_count_;
@@ -87,8 +83,7 @@ class TestSourceLaggy : public TestSourceBasic {
TestSourceLaggy(int laggy_after_buffer, int lag_in_ms)
: laggy_after_buffer_(laggy_after_buffer), lag_in_ms_(lag_in_ms) {
}
- virtual int OnMoreData(AudioBus* audio_bus,
- uint32 total_bytes_delay) {
+ int OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) override {
// Call the base, which increments the callback_count_.
TestSourceBasic::OnMoreData(audio_bus, total_bytes_delay);
if (callback_count() > kMaxNumBuffers) {
@@ -160,10 +155,7 @@ class ReadOnlyMappedFile {
// Test that can it be created and closed.
TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
@@ -176,10 +168,7 @@ TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
// Test that can it be cannot be created with invalid parameters.
TEST(WinAudioTest, SanityOnMakeParams) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
@@ -212,10 +201,7 @@ TEST(WinAudioTest, SanityOnMakeParams) {
// Test that it can be opened and closed.
TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
@@ -229,10 +215,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
// Test that it has a maximum packet size.
TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
@@ -248,10 +231,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
// the test completes in reasonable time.
TEST(WinAudioTest, PCMWaveSlowSource) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
@@ -276,10 +256,7 @@ TEST(WinAudioTest, PCMWaveSlowSource) {
// bug 19276 for more details.
TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
@@ -335,10 +312,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
// sound with a lower volume than PCMWaveStreamPlay200HzTone44Kss.
TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 20;
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
@@ -370,10 +344,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
// object roughly at the same time.
TEST(WinAudioTest, PushSourceFile16KHz) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
static const int kSampleRate = 16000;
SineWaveAudioSource source(1, 200.0, kSampleRate);
@@ -413,10 +384,7 @@ TEST(WinAudioTest, PushSourceFile16KHz) {
// of silence.
TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
@@ -450,13 +418,7 @@ TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
// smaller buffer size for WASAPI than for Wave.
TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- // The WASAPI API requires a correct COM environment.
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
// Use 10 ms buffer size for WASAPI and 50 ms buffer size for Wave.
// Take the existing native sample rate into account.
@@ -494,10 +456,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
// Check that the pending bytes value is correct what the stream starts.
TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
@@ -556,11 +515,10 @@ class SyncSocketSource : public AudioOutputStream::AudioSourceCallback {
base::AlignedAlloc(data_size_, AudioBus::kChannelAlignment)));
audio_bus_ = AudioBus::WrapMemory(params, data_.get());
}
- ~SyncSocketSource() {}
+ ~SyncSocketSource() override {}
// AudioSourceCallback::OnMoreData implementation:
- virtual int OnMoreData(AudioBus* audio_bus,
- uint32 total_bytes_delay) {
+ int OnMoreData(AudioBus* audio_bus, uint32 total_bytes_delay) override {
socket_->Send(&total_bytes_delay, sizeof(total_bytes_delay));
uint32 size = socket_->Receive(data_.get(), data_size_);
DCHECK_EQ(static_cast<size_t>(size) % sizeof(*audio_bus_->channel(0)), 0U);
@@ -569,8 +527,7 @@ class SyncSocketSource : public AudioOutputStream::AudioSourceCallback {
}
// AudioSourceCallback::OnError implementation:
- virtual void OnError(AudioOutputStream* stream) {
- }
+ void OnError(AudioOutputStream* stream) override {}
private:
base::SyncSocket* socket_;
@@ -630,10 +587,7 @@ DWORD __stdcall SyncSocketThread(void* context) {
// In this test you should hear a continuous 200Hz tone for 2 seconds.
TEST(WinAudioTest, SyncSocketBasic) {
scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
+ ABORT_AUDIO_TEST_IF_NOT(audio_man->HasAudioOutputDevices());
static const int sample_rate = AudioParameters::kAudioCDSampleRate;
static const uint32 kSamples20ms = sample_rate / 50;
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index bf7967f8298..8e1a82bac2d 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -4,7 +4,6 @@
#include "media/audio/win/core_audio_util_win.h"
-#include <audioclient.h>
#include <devicetopology.h>
#include <functiondiscoverykeys_devpkey.h>
@@ -136,18 +135,6 @@ static bool LoadAudiosesDll() {
return (LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH) != NULL);
}
-static bool CanCreateDeviceEnumerator() {
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
- HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
- NULL, CLSCTX_INPROC_SERVER);
-
- // If we hit CO_E_NOTINITIALIZED, CoInitialize has not been called and it
- // must be called at least once for each thread that uses the COM library.
- CHECK_NE(hr, CO_E_NOTINITIALIZED);
-
- return SUCCEEDED(hr);
-}
-
static std::string GetDeviceID(IMMDevice* device) {
ScopedCoMem<WCHAR> device_id_com;
std::string device_id;
@@ -156,9 +143,53 @@ static std::string GetDeviceID(IMMDevice* device) {
return device_id;
}
-bool CoreAudioUtil::IsSupported() {
+static HRESULT GetDeviceFriendlyNameInternal(IMMDevice* device,
+ std::string* friendly_name) {
+ // Retrieve user-friendly name of endpoint device.
+ // Example: "Microphone (Realtek High Definition Audio)".
+ ScopedComPtr<IPropertyStore> properties;
+ HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
+ if (FAILED(hr))
+ return hr;
+
+ base::win::ScopedPropVariant friendly_name_pv;
+ hr = properties->GetValue(PKEY_Device_FriendlyName,
+ friendly_name_pv.Receive());
+ if (FAILED(hr))
+ return hr;
+
+ if (friendly_name_pv.get().vt == VT_LPWSTR &&
+ friendly_name_pv.get().pwszVal) {
+ base::WideToUTF8(friendly_name_pv.get().pwszVal,
+ wcslen(friendly_name_pv.get().pwszVal), friendly_name);
+ }
+
+ return hr;
+}
+
+static ScopedComPtr<IMMDeviceEnumerator> CreateDeviceEnumeratorInternal(
+ bool allow_reinitialize) {
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
+ HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL, CLSCTX_INPROC_SERVER);
+ if (hr == CO_E_NOTINITIALIZED && allow_reinitialize) {
+ LOG(ERROR) << "CoCreateInstance fails with CO_E_NOTINITIALIZED";
+ // We have seen crashes which indicates that this method can in fact
+ // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
+ // modules. Calling CoInitializeEx is an attempt to resolve the reported
+ // issues. See http://crbug.com/378465 for details.
+ hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
+ if (SUCCEEDED(hr)) {
+ hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL, CLSCTX_INPROC_SERVER);
+ }
+ }
+ return device_enumerator;
+}
+
+static bool IsSupportedInternal() {
// It is possible to force usage of WaveXxx APIs by using a command line flag.
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
if (cmd_line->HasSwitch(switches::kForceWaveAudio)) {
DVLOG(1) << "Forcing usage of Windows WaveXxx APIs";
return false;
@@ -176,19 +207,28 @@ bool CoreAudioUtil::IsSupported() {
// the Audioses DLL since it depends on Mmdevapi.dll.
// See http://crbug.com/166397 why this extra step is required to guarantee
// Core Audio support.
- static bool g_audioses_dll_available = LoadAudiosesDll();
- if (!g_audioses_dll_available)
+ if (!LoadAudiosesDll())
return false;
// Being able to load the Audioses.dll does not seem to be sufficient for
// all devices to guarantee Core Audio support. To be 100%, we also verify
// that it is possible to a create the IMMDeviceEnumerator interface. If this
// works as well we should be home free.
- static bool g_can_create_device_enumerator = CanCreateDeviceEnumerator();
- LOG_IF(ERROR, !g_can_create_device_enumerator)
- << "Failed to create Core Audio device enumerator on thread with ID "
- << GetCurrentThreadId();
- return g_can_create_device_enumerator;
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
+ CreateDeviceEnumeratorInternal(false);
+ if (!device_enumerator) {
+ LOG(ERROR)
+ << "Failed to create Core Audio device enumerator on thread with ID "
+ << GetCurrentThreadId();
+ return false;
+ }
+
+ return true;
+}
+
+bool CoreAudioUtil::IsSupported() {
+ static bool g_is_supported = IsSupportedInternal();
+ return g_is_supported;
}
base::TimeDelta CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time) {
@@ -197,7 +237,7 @@ base::TimeDelta CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time) {
}
AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
return AUDCLNT_SHAREMODE_EXCLUSIVE;
return AUDCLNT_SHAREMODE_SHARED;
@@ -208,7 +248,7 @@ int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
// Create the IMMDeviceEnumerator interface.
ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
CreateDeviceEnumerator();
- if (!device_enumerator)
+ if (!device_enumerator.get())
return 0;
// Generate a collection of active (present and not disabled) audio endpoint
@@ -233,22 +273,9 @@ int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
DCHECK(IsSupported());
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
- HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
- NULL, CLSCTX_INPROC_SERVER);
- if (hr == CO_E_NOTINITIALIZED) {
- LOG(ERROR) << "CoCreateInstance fails with CO_E_NOTINITIALIZED";
- // We have seen crashes which indicates that this method can in fact
- // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
- // modules. Calling CoInitializeEx is an attempt to resolve the reported
- // issues. See http://crbug.com/378465 for details.
- hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
- if (SUCCEEDED(hr)) {
- hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
- NULL, CLSCTX_INPROC_SERVER);
- }
- }
- CHECK(SUCCEEDED(hr));
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
+ CreateDeviceEnumeratorInternal(true);
+ CHECK(device_enumerator);
return device_enumerator;
}
@@ -260,7 +287,7 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
// Create the IMMDeviceEnumerator interface.
ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
CreateDeviceEnumerator();
- if (!device_enumerator)
+ if (!device_enumerator.get())
return endpoint_device;
// Retrieve the default audio endpoint for the specified data-flow
@@ -290,7 +317,7 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
std::string CoreAudioUtil::GetDefaultOutputDeviceID() {
DCHECK(IsSupported());
ScopedComPtr<IMMDevice> device(CreateDefaultDevice(eRender, eConsole));
- return device ? GetDeviceID(device) : std::string();
+ return device.get() ? GetDeviceID(device.get()) : std::string();
}
ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
@@ -301,7 +328,7 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
// Create the IMMDeviceEnumerator interface.
ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
CreateDeviceEnumerator();
- if (!device_enumerator)
+ if (!device_enumerator.get())
return endpoint_device;
// Retrieve an audio device specified by an endpoint device-identification
@@ -323,21 +350,9 @@ HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
if (device_name.unique_id.empty())
return E_FAIL;
- // Retrieve user-friendly name of endpoint device.
- // Example: "Microphone (Realtek High Definition Audio)".
- ScopedComPtr<IPropertyStore> properties;
- HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
- if (FAILED(hr))
- return hr;
- base::win::ScopedPropVariant friendly_name;
- hr = properties->GetValue(PKEY_Device_FriendlyName, friendly_name.Receive());
+ HRESULT hr = GetDeviceFriendlyNameInternal(device, &device_name.device_name);
if (FAILED(hr))
return hr;
- if (friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
- base::WideToUTF8(friendly_name.get().pwszVal,
- wcslen(friendly_name.get().pwszVal),
- &device_name.device_name);
- }
*name = device_name;
DVLOG(2) << "friendly name: " << device_name.device_name;
@@ -395,12 +410,13 @@ std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
std::string CoreAudioUtil::GetMatchingOutputDeviceID(
const std::string& input_device_id) {
ScopedComPtr<IMMDevice> input_device(CreateDevice(input_device_id));
- if (!input_device)
+ if (!input_device.get())
return std::string();
// See if we can get id of the associated controller.
ScopedComPtr<IMMDeviceEnumerator> enumerator(CreateDeviceEnumerator());
- std::string controller_id(GetAudioControllerID(input_device, enumerator));
+ std::string controller_id(
+ GetAudioControllerID(input_device.get(), enumerator.get()));
if (controller_id.empty())
return std::string();
@@ -409,7 +425,7 @@ std::string CoreAudioUtil::GetMatchingOutputDeviceID(
ScopedComPtr<IMMDeviceCollection> collection;
enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE,
collection.Receive());
- if (!collection)
+ if (!collection.get())
return std::string();
UINT count = 0;
@@ -417,24 +433,24 @@ std::string CoreAudioUtil::GetMatchingOutputDeviceID(
ScopedComPtr<IMMDevice> output_device;
for (UINT i = 0; i < count; ++i) {
collection->Item(i, output_device.Receive());
- std::string output_controller_id(GetAudioControllerID(
- output_device, enumerator));
+ std::string output_controller_id(
+ GetAudioControllerID(output_device.get(), enumerator.get()));
if (output_controller_id == controller_id)
break;
output_device = NULL;
}
- return output_device ? GetDeviceID(output_device) : std::string();
+ return output_device.get() ? GetDeviceID(output_device.get()) : std::string();
}
std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
DCHECK(IsSupported());
ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
- if (!audio_device)
+ if (!audio_device.get())
return std::string();
AudioDeviceName device_name;
- HRESULT hr = GetDeviceName(audio_device, &device_name);
+ HRESULT hr = GetDeviceName(audio_device.get(), &device_name);
if (FAILED(hr))
return std::string();
@@ -446,10 +462,10 @@ bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
const std::string& device_id) {
DCHECK(IsSupported());
ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role);
- if (!device)
+ if (!device.get())
return false;
- std::string str_default(GetDeviceID(device));
+ std::string str_default(GetDeviceID(device.get()));
return device_id.compare(str_default) == 0;
}
@@ -490,8 +506,8 @@ ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
EDataFlow data_flow, ERole role) {
DCHECK(IsSupported());
ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
- return (default_device ? CreateClient(default_device) :
- ScopedComPtr<IAudioClient>());
+ return (default_device.get() ? CreateClient(default_device.get())
+ : ScopedComPtr<IAudioClient>());
}
ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
@@ -500,10 +516,10 @@ ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
return CreateDefaultClient(data_flow, role);
ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
- if (!device)
+ if (!device.get())
return ScopedComPtr<IAudioClient>();
- return CreateClient(device);
+ return CreateClient(device.get());
}
HRESULT CoreAudioUtil::GetSharedModeMixFormat(
@@ -554,11 +570,11 @@ bool CoreAudioUtil::IsChannelLayoutSupported(const std::string& device_id,
// First, get the preferred mixing format for shared mode streams.
ScopedComPtr<IAudioClient> client(CreateClient(device_id, data_flow, role));
- if (!client)
+ if (!client.get())
return false;
WAVEFORMATPCMEX format;
- HRESULT hr = GetSharedModeMixFormat(client, &format);
+ HRESULT hr = GetSharedModeMixFormat(client.get(), &format);
if (FAILED(hr))
return false;
@@ -591,8 +607,8 @@ bool CoreAudioUtil::IsChannelLayoutSupported(const std::string& device_id,
// an even wider range of shared-mode formats where the installation package
// for the audio device includes a local effects (LFX) audio processing
// object (APO) that can handle format conversions.
- return CoreAudioUtil::IsFormatSupported(client, AUDCLNT_SHAREMODE_SHARED,
- &format);
+ return CoreAudioUtil::IsFormatSupported(client.get(),
+ AUDCLNT_SHAREMODE_SHARED, &format);
}
HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
@@ -644,10 +660,10 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
// Some devices don't appear to set a valid channel layout, so guess based on
// the number of channels. See http://crbug.com/311906.
if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) {
- VLOG(1) << "Unsupported channel config: "
- << std::hex << channel_config
- << ". Guessing layout by channel count: "
- << std::dec << mix_format.Format.nChannels;
+ DVLOG(1) << "Unsupported channel config: "
+ << std::hex << channel_config
+ << ". Guessing layout by channel count: "
+ << std::dec << mix_format.Format.nChannels;
channel_layout = GuessChannelLayout(mix_format.Format.nChannels);
}
@@ -685,13 +701,13 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
EDataFlow data_flow, ERole role, AudioParameters* params) {
DCHECK(IsSupported());
ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
- if (!client) {
+ if (!client.get()) {
// Map NULL-pointer to new error code which can be different from the
// actual error code. The exact value is not important here.
return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
}
- HRESULT hr = GetPreferredAudioParameters(client, params);
+ HRESULT hr = GetPreferredAudioParameters(client.get(), params);
if (FAILED(hr))
return hr;
@@ -710,19 +726,19 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
const std::string& device_id, AudioParameters* params) {
DCHECK(IsSupported());
ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
- if (!device) {
+ if (!device.get()) {
// Map NULL-pointer to new error code which can be different from the
// actual error code. The exact value is not important here.
return AUDCLNT_E_DEVICE_INVALIDATED;
}
- ScopedComPtr<IAudioClient> client(CreateClient(device));
- if (!client) {
+ ScopedComPtr<IAudioClient> client(CreateClient(device.get()));
+ if (!client.get()) {
// Map NULL-pointer to new error code which can be different from the
// actual error code. The exact value is not important here.
return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
}
- return GetPreferredAudioParameters(client, params);
+ return GetPreferredAudioParameters(client.get(), params);
}
ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
@@ -731,7 +747,7 @@ ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
CreateClient(device_id, data_flow, eConsole));
WAVEFORMATPCMEX format = {0};
- if (!client || FAILED(GetSharedModeMixFormat(client, &format)))
+ if (!client.get() || FAILED(GetSharedModeMixFormat(client.get(), &format)))
return 0;
return static_cast<ChannelConfig>(format.dwChannelMask);
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
index cc8271ecc2d..f337849eafb 100644
--- a/chromium/media/audio/win/core_audio_util_win_unittest.cc
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -8,6 +8,7 @@
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_handle.h"
+#include "media/audio/audio_unittest_util.h"
#include "media/audio/win/core_audio_util_win.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -18,30 +19,25 @@ namespace media {
class CoreAudioUtilWinTest : public ::testing::Test {
protected:
- // The test runs on a COM thread in the multithreaded apartment (MTA).
+ // The tests must run on a COM thread.
// If we don't initialize the COM library on a thread before using COM,
// all function calls will return CO_E_NOTINITIALIZED.
- CoreAudioUtilWinTest()
- : com_init_(ScopedCOMInitializer::kMTA) {
+ CoreAudioUtilWinTest() {
DCHECK(com_init_.succeeded());
}
- virtual ~CoreAudioUtilWinTest() {}
-
- bool CanRunAudioTest() {
- bool core_audio = CoreAudioUtil::IsSupported();
- if (!core_audio)
- return false;
- int capture_devices = CoreAudioUtil::NumberOfActiveDevices(eCapture);
- int render_devices = CoreAudioUtil::NumberOfActiveDevices(eRender);
- return ((capture_devices > 0) && (render_devices > 0));
+ ~CoreAudioUtilWinTest() override {}
+
+ bool DevicesAvailable() {
+ return CoreAudioUtil::IsSupported() &&
+ CoreAudioUtil::NumberOfActiveDevices(eCapture) > 0 &&
+ CoreAudioUtil::NumberOfActiveDevices(eRender) > 0;
}
ScopedCOMInitializer com_init_;
};
TEST_F(CoreAudioUtilWinTest, NumberOfActiveDevices) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
int render_devices = CoreAudioUtil::NumberOfActiveDevices(eRender);
EXPECT_GT(render_devices, 0);
@@ -52,17 +48,15 @@ TEST_F(CoreAudioUtilWinTest, NumberOfActiveDevices) {
}
TEST_F(CoreAudioUtilWinTest, CreateDeviceEnumerator) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
ScopedComPtr<IMMDeviceEnumerator> enumerator =
CoreAudioUtil::CreateDeviceEnumerator();
- EXPECT_TRUE(enumerator);
+ EXPECT_TRUE(enumerator.get());
}
TEST_F(CoreAudioUtilWinTest, CreateDefaultDevice) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
struct {
EDataFlow flow;
@@ -81,43 +75,41 @@ TEST_F(CoreAudioUtilWinTest, CreateDefaultDevice) {
for (int i = 0; i < arraysize(data); ++i) {
audio_device =
CoreAudioUtil::CreateDefaultDevice(data[i].flow, data[i].role);
- EXPECT_TRUE(audio_device);
- EXPECT_EQ(data[i].flow, CoreAudioUtil::GetDataFlow(audio_device));
+ EXPECT_TRUE(audio_device.get());
+ EXPECT_EQ(data[i].flow, CoreAudioUtil::GetDataFlow(audio_device.get()));
}
// Only eRender and eCapture are allowed as flow parameter.
audio_device = CoreAudioUtil::CreateDefaultDevice(eAll, eConsole);
- EXPECT_FALSE(audio_device);
+ EXPECT_FALSE(audio_device.get());
}
TEST_F(CoreAudioUtilWinTest, CreateDevice) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
// Get name and ID of default device used for playback.
ScopedComPtr<IMMDevice> default_render_device =
CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
AudioDeviceName default_render_name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(default_render_device,
- &default_render_name)));
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(
+ default_render_device.get(), &default_render_name)));
// Use the uniqe ID as input to CreateDevice() and create a corresponding
// IMMDevice.
ScopedComPtr<IMMDevice> audio_device =
CoreAudioUtil::CreateDevice(default_render_name.unique_id);
- EXPECT_TRUE(audio_device);
+ EXPECT_TRUE(audio_device.get());
// Verify that the two IMMDevice interfaces represents the same endpoint
// by comparing their unique IDs.
AudioDeviceName device_name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device,
- &device_name)));
+ EXPECT_TRUE(SUCCEEDED(
+ CoreAudioUtil::GetDeviceName(audio_device.get(), &device_name)));
EXPECT_EQ(default_render_name.unique_id, device_name.unique_id);
}
TEST_F(CoreAudioUtilWinTest, GetDefaultDeviceName) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
struct {
EDataFlow flow;
@@ -135,20 +127,19 @@ TEST_F(CoreAudioUtilWinTest, GetDefaultDeviceName) {
for (int i = 0; i < arraysize(data); ++i) {
audio_device =
CoreAudioUtil::CreateDefaultDevice(data[i].flow, data[i].role);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device,
- &device_name)));
+ EXPECT_TRUE(SUCCEEDED(
+ CoreAudioUtil::GetDeviceName(audio_device.get(), &device_name)));
EXPECT_FALSE(device_name.device_name.empty());
EXPECT_FALSE(device_name.unique_id.empty());
}
}
TEST_F(CoreAudioUtilWinTest, GetAudioControllerID) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
ScopedComPtr<IMMDeviceEnumerator> enumerator(
CoreAudioUtil::CreateDeviceEnumerator());
- ASSERT_TRUE(enumerator);
+ ASSERT_TRUE(enumerator.get());
// Enumerate all active input and output devices and fetch the ID of
// the associated device.
@@ -162,22 +153,21 @@ TEST_F(CoreAudioUtilWinTest, GetAudioControllerID) {
for (UINT j = 0; j < count; ++j) {
ScopedComPtr<IMMDevice> device;
collection->Item(j, device.Receive());
- std::string controller_id(CoreAudioUtil::GetAudioControllerID(
- device, enumerator));
+ std::string controller_id(
+ CoreAudioUtil::GetAudioControllerID(device.get(), enumerator.get()));
EXPECT_FALSE(controller_id.empty());
}
}
}
TEST_F(CoreAudioUtilWinTest, GetFriendlyName) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
// Get name and ID of default device used for recording.
ScopedComPtr<IMMDevice> audio_device =
CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
AudioDeviceName device_name;
- HRESULT hr = CoreAudioUtil::GetDeviceName(audio_device, &device_name);
+ HRESULT hr = CoreAudioUtil::GetDeviceName(audio_device.get(), &device_name);
EXPECT_TRUE(SUCCEEDED(hr));
// Use unique ID as input to GetFriendlyName() and compare the result
@@ -188,43 +178,41 @@ TEST_F(CoreAudioUtilWinTest, GetFriendlyName) {
// Same test as above but for playback.
audio_device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- hr = CoreAudioUtil::GetDeviceName(audio_device, &device_name);
+ hr = CoreAudioUtil::GetDeviceName(audio_device.get(), &device_name);
EXPECT_TRUE(SUCCEEDED(hr));
friendly_name = CoreAudioUtil::GetFriendlyName(device_name.unique_id);
EXPECT_EQ(friendly_name, device_name.device_name);
}
TEST_F(CoreAudioUtilWinTest, DeviceIsDefault) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
// Verify that the default render device is correctly identified as a
// default device.
ScopedComPtr<IMMDevice> audio_device =
CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
AudioDeviceName name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
+ EXPECT_TRUE(
+ SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device.get(), &name)));
const std::string id = name.unique_id;
EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eRender, eConsole, id));
EXPECT_FALSE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole, id));
}
TEST_F(CoreAudioUtilWinTest, CreateDefaultClient) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
for (int i = 0; i < arraysize(data); ++i) {
ScopedComPtr<IAudioClient> client;
client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
+ EXPECT_TRUE(client.get());
}
}
TEST_F(CoreAudioUtilWinTest, CreateClient) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
@@ -232,28 +220,27 @@ TEST_F(CoreAudioUtilWinTest, CreateClient) {
ScopedComPtr<IMMDevice> device;
ScopedComPtr<IAudioClient> client;
device = CoreAudioUtil::CreateDefaultDevice(data[i], eConsole);
- EXPECT_TRUE(device);
- EXPECT_EQ(data[i], CoreAudioUtil::GetDataFlow(device));
- client = CoreAudioUtil::CreateClient(device);
- EXPECT_TRUE(client);
+ EXPECT_TRUE(device.get());
+ EXPECT_EQ(data[i], CoreAudioUtil::GetDataFlow(device.get()));
+ client = CoreAudioUtil::CreateClient(device.get());
+ EXPECT_TRUE(client.get());
}
}
TEST_F(CoreAudioUtilWinTest, GetSharedModeMixFormat) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
ScopedComPtr<IMMDevice> device;
ScopedComPtr<IAudioClient> client;
device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- EXPECT_TRUE(device);
- client = CoreAudioUtil::CreateClient(device);
- EXPECT_TRUE(client);
+ EXPECT_TRUE(device.get());
+ client = CoreAudioUtil::CreateClient(device.get());
+ EXPECT_TRUE(client.get());
// Perform a simple sanity test of the aquired format structure.
WAVEFORMATPCMEX format;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
+ EXPECT_TRUE(
+ SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client.get(), &format)));
EXPECT_GE(format.Format.nChannels, 1);
EXPECT_GE(format.Format.nSamplesPerSec, 8000u);
EXPECT_GE(format.Format.wBitsPerSample, 16);
@@ -262,8 +249,7 @@ TEST_F(CoreAudioUtilWinTest, GetSharedModeMixFormat) {
}
TEST_F(CoreAudioUtilWinTest, IsChannelLayoutSupported) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
// The preferred channel layout should always be supported. Being supported
// means that it is possible to initialize a shared mode stream with the
@@ -289,8 +275,7 @@ TEST_F(CoreAudioUtilWinTest, IsChannelLayoutSupported) {
}
TEST_F(CoreAudioUtilWinTest, GetDevicePeriod) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
@@ -301,20 +286,19 @@ TEST_F(CoreAudioUtilWinTest, GetDevicePeriod) {
REFERENCE_TIME shared_time_period = 0;
REFERENCE_TIME exclusive_time_period = 0;
client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
+ EXPECT_TRUE(client.get());
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
- client, AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
+ client.get(), AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
EXPECT_GT(shared_time_period, 0);
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
- client, AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
+ client.get(), AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
EXPECT_GT(exclusive_time_period, 0);
EXPECT_LE(exclusive_time_period, shared_time_period);
}
}
TEST_F(CoreAudioUtilWinTest, GetPreferredAudioParameters) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
@@ -324,42 +308,41 @@ TEST_F(CoreAudioUtilWinTest, GetPreferredAudioParameters) {
ScopedComPtr<IAudioClient> client;
AudioParameters params;
client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(client,
- &params)));
+ EXPECT_TRUE(client.get());
+ EXPECT_TRUE(SUCCEEDED(
+ CoreAudioUtil::GetPreferredAudioParameters(client.get(), &params)));
EXPECT_TRUE(params.IsValid());
}
}
TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
ScopedComPtr<IAudioClient> client;
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
+ EXPECT_TRUE(client.get());
WAVEFORMATPCMEX format;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
+ EXPECT_TRUE(
+ SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client.get(), &format)));
// Perform a shared-mode initialization without event-driven buffer handling.
uint32 endpoint_buffer_size = 0;
- HRESULT hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ HRESULT hr = CoreAudioUtil::SharedModeInitialize(client.get(), &format, NULL,
&endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
// It is only possible to create a client once.
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ hr = CoreAudioUtil::SharedModeInitialize(client.get(), &format, NULL,
&endpoint_buffer_size, NULL);
EXPECT_FALSE(SUCCEEDED(hr));
EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
// Verify that it is possible to reinitialize the client after releasing it.
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ EXPECT_TRUE(client.get());
+ hr = CoreAudioUtil::SharedModeInitialize(client.get(), &format, NULL,
&endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
@@ -368,11 +351,11 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
// A simple way to emulate an invalid format is to use the shared-mode
// mixing format and modify the preferred sample.
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
+ EXPECT_TRUE(client.get());
format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1;
EXPECT_FALSE(CoreAudioUtil::IsFormatSupported(
- client, AUDCLNT_SHAREMODE_SHARED, &format));
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ client.get(), AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = CoreAudioUtil::SharedModeInitialize(client.get(), &format, NULL,
&endpoint_buffer_size, NULL);
EXPECT_TRUE(FAILED(hr));
EXPECT_EQ(hr, E_INVALIDARG);
@@ -383,20 +366,19 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
// The event handle should be in the nonsignaled state.
base::win::ScopedHandle event_handle(::CreateEvent(NULL, TRUE, FALSE, NULL));
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
+ EXPECT_TRUE(client.get());
+ EXPECT_TRUE(
+ SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client.get(), &format)));
EXPECT_TRUE(CoreAudioUtil::IsFormatSupported(
- client, AUDCLNT_SHAREMODE_SHARED, &format));
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, event_handle.Get(),
- &endpoint_buffer_size, NULL);
+ client.get(), AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = CoreAudioUtil::SharedModeInitialize(
+ client.get(), &format, event_handle.Get(), &endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
}
TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
@@ -409,57 +391,56 @@ TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
ScopedComPtr<IAudioCaptureClient> capture_client;
client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
+ EXPECT_TRUE(client.get());
+ EXPECT_TRUE(SUCCEEDED(
+ CoreAudioUtil::GetSharedModeMixFormat(client.get(), &format)));
if (data[i] == eRender) {
// It is not possible to create a render client using an unitialized
// client interface.
- render_client = CoreAudioUtil::CreateRenderClient(client);
- EXPECT_FALSE(render_client);
+ render_client = CoreAudioUtil::CreateRenderClient(client.get());
+ EXPECT_FALSE(render_client.get());
// Do a proper initialization and verify that it works this time.
- CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ CoreAudioUtil::SharedModeInitialize(client.get(), &format, NULL,
&endpoint_buffer_size, NULL);
- render_client = CoreAudioUtil::CreateRenderClient(client);
- EXPECT_TRUE(render_client);
+ render_client = CoreAudioUtil::CreateRenderClient(client.get());
+ EXPECT_TRUE(render_client.get());
EXPECT_GT(endpoint_buffer_size, 0u);
} else if (data[i] == eCapture) {
// It is not possible to create a capture client using an unitialized
// client interface.
- capture_client = CoreAudioUtil::CreateCaptureClient(client);
- EXPECT_FALSE(capture_client);
+ capture_client = CoreAudioUtil::CreateCaptureClient(client.get());
+ EXPECT_FALSE(capture_client.get());
// Do a proper initialization and verify that it works this time.
- CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ CoreAudioUtil::SharedModeInitialize(client.get(), &format, NULL,
&endpoint_buffer_size, NULL);
- capture_client = CoreAudioUtil::CreateCaptureClient(client);
- EXPECT_TRUE(capture_client);
+ capture_client = CoreAudioUtil::CreateCaptureClient(client.get());
+ EXPECT_TRUE(capture_client.get());
EXPECT_GT(endpoint_buffer_size, 0u);
}
}
}
TEST_F(CoreAudioUtilWinTest, FillRenderEndpointBufferWithSilence) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
// Create default clients using the default mixing format for shared mode.
ScopedComPtr<IAudioClient> client(
CoreAudioUtil::CreateDefaultClient(eRender, eConsole));
- EXPECT_TRUE(client);
+ EXPECT_TRUE(client.get());
WAVEFORMATPCMEX format;
uint32 endpoint_buffer_size = 0;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
- CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ EXPECT_TRUE(
+ SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client.get(), &format)));
+ CoreAudioUtil::SharedModeInitialize(client.get(), &format, NULL,
&endpoint_buffer_size, NULL);
EXPECT_GT(endpoint_buffer_size, 0u);
ScopedComPtr<IAudioRenderClient> render_client(
- CoreAudioUtil::CreateRenderClient(client));
- EXPECT_TRUE(render_client);
+ CoreAudioUtil::CreateRenderClient(client.get()));
+ EXPECT_TRUE(render_client.get());
// The endpoint audio buffer should not be filled up by default after being
// created.
@@ -472,24 +453,21 @@ TEST_F(CoreAudioUtilWinTest, FillRenderEndpointBufferWithSilence) {
// since we can't access data that has already been sent to the endpoint
// buffer.
EXPECT_TRUE(CoreAudioUtil::FillRenderEndpointBufferWithSilence(
- client, render_client));
+ client.get(), render_client.get()));
client->GetCurrentPadding(&num_queued_frames);
EXPECT_EQ(num_queued_frames, endpoint_buffer_size);
}
-// This test can only succeed on a machine that has audio hardware
-// that has both input and output devices. Currently this is the case
-// with our test bots and the CanRunAudioTest() method should make sure
-// that the test won't run in unsupported environments, but be warned.
+// This test can only run on a machine that has audio hardware
+// that has both input and output devices.
TEST_F(CoreAudioUtilWinTest, GetMatchingOutputDeviceID) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
bool found_a_pair = false;
ScopedComPtr<IMMDeviceEnumerator> enumerator(
CoreAudioUtil::CreateDeviceEnumerator());
- ASSERT_TRUE(enumerator);
+ ASSERT_TRUE(enumerator.get());
// Enumerate all active input and output devices and fetch the ID of
// the associated device.
@@ -512,8 +490,7 @@ TEST_F(CoreAudioUtilWinTest, GetMatchingOutputDeviceID) {
}
TEST_F(CoreAudioUtilWinTest, GetDefaultOutputDeviceID) {
- if (!CanRunAudioTest())
- return;
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
std::string default_device_id(CoreAudioUtil::GetDefaultOutputDeviceID());
EXPECT_FALSE(default_device_id.empty());
diff --git a/chromium/media/audio/win/wavein_input_win.cc b/chromium/media/audio/win/wavein_input_win.cc
index 72c58417bd3..6543ee5e212 100644
--- a/chromium/media/audio/win/wavein_input_win.cc
+++ b/chromium/media/audio/win/wavein_input_win.cc
@@ -215,9 +215,10 @@ double PCMWaveInAudioInputStream::GetVolume() {
return 0.0;
}
-void PCMWaveInAudioInputStream::SetAutomaticGainControl(bool enabled) {
+bool PCMWaveInAudioInputStream::SetAutomaticGainControl(bool enabled) {
// TODO(henrika): Add AGC support when volume control has been added.
NOTIMPLEMENTED();
+ return false;
}
bool PCMWaveInAudioInputStream::GetAutomaticGainControl() {
diff --git a/chromium/media/audio/win/wavein_input_win.h b/chromium/media/audio/win/wavein_input_win.h
index 8c22c9b5896..a2c77c34c48 100644
--- a/chromium/media/audio/win/wavein_input_win.h
+++ b/chromium/media/audio/win/wavein_input_win.h
@@ -32,20 +32,20 @@ class PCMWaveInAudioInputStream : public AudioInputStream {
const AudioParameters& params,
int num_buffers,
const std::string& device_id);
- virtual ~PCMWaveInAudioInputStream();
+ ~PCMWaveInAudioInputStream() override;
// Implementation of AudioInputStream.
- virtual bool Open() override;
- virtual void Start(AudioInputCallback* callback) override;
- virtual void Stop() override;
- virtual void Close() override;
+ bool Open() override;
+ void Start(AudioInputCallback* callback) override;
+ void Stop() override;
+ void Close() override;
// TODO(henrika): Add volume support using the Audio Mixer API.
- virtual double GetMaxVolume() override;
- virtual void SetVolume(double volume) override;
- virtual double GetVolume() override;
- virtual void SetAutomaticGainControl(bool enabled) override;
- virtual bool GetAutomaticGainControl() override;
- virtual bool IsMuted() override;
+ double GetMaxVolume() override;
+ void SetVolume(double volume) override;
+ double GetVolume() override;
+ bool SetAutomaticGainControl(bool enabled) override;
+ bool GetAutomaticGainControl() override;
+ bool IsMuted() override;
private:
enum State {
diff --git a/chromium/media/audio/win/waveout_output_win.cc b/chromium/media/audio/win/waveout_output_win.cc
index 9e9e46ad16b..7f35065ab37 100644
--- a/chromium/media/audio/win/waveout_output_win.cc
+++ b/chromium/media/audio/win/waveout_output_win.cc
@@ -4,14 +4,12 @@
#include "media/audio/win/waveout_output_win.h"
-#include <windows.h>
-#include <mmsystem.h>
#pragma comment(lib, "winmm.lib")
#include "base/atomicops.h"
#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "base/trace_event/trace_event.h"
#include "media/audio/audio_io.h"
#include "media/audio/win/audio_manager_win.h"
diff --git a/chromium/media/audio/win/waveout_output_win.h b/chromium/media/audio/win/waveout_output_win.h
index 5c7009d0971..bd9da5edc7b 100644
--- a/chromium/media/audio/win/waveout_output_win.h
+++ b/chromium/media/audio/win/waveout_output_win.h
@@ -38,15 +38,15 @@ class PCMWaveOutAudioOutputStream : public AudioOutputStream {
const AudioParameters& params,
int num_buffers,
UINT device_id);
- virtual ~PCMWaveOutAudioOutputStream();
+ ~PCMWaveOutAudioOutputStream() override;
// Implementation of AudioOutputStream.
- virtual bool Open();
- virtual void Close();
- virtual void Start(AudioSourceCallback* callback);
- virtual void Stop();
- virtual void SetVolume(double volume);
- virtual void GetVolume(double* volume);
+ bool Open() override;
+ void Close() override;
+ void Start(AudioSourceCallback* callback) override;
+ void Stop() override;
+ void SetVolume(double volume) override;
+ void GetVolume(double* volume) override;
// Sends a buffer to the audio driver for playback.
void QueueNextPacket(WAVEHDR* buffer);
diff --git a/chromium/media/audio_unittests.isolate b/chromium/media/audio_unittests.isolate
new file mode 100644
index 00000000000..d91c6360b48
--- /dev/null
+++ b/chromium/media/audio_unittests.isolate
@@ -0,0 +1,70 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ '../base/base.isolate',
+ ],
+ 'conditions': [
+ ['use_x11==0', {
+ 'variables': {
+ 'command': [
+ '../testing/test_env.py',
+ '<(PRODUCT_DIR)/audio_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ },
+ }],
+ ['use_x11==1', {
+ 'variables': {
+ 'command': [
+ '../testing/xvfb.py',
+ '<(PRODUCT_DIR)',
+ '<(PRODUCT_DIR)/audio_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ 'files': [
+ '../testing/xvfb.py',
+ '<(PRODUCT_DIR)/xdisplaycheck',
+ ],
+ },
+ }],
+ ['OS=="android" or OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ 'test/data/',
+ ],
+ },
+ }],
+ ['OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ '../testing/test_env.py',
+ '<(PRODUCT_DIR)/audio_unittests<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1 and fastbuild==0', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/audio_unittests.dSYM/',
+ ],
+ },
+ }],
+ ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/audio_unittests.exe.pdb',
+ ],
+ },
+ }],
+ ],
+}
diff --git a/chromium/media/base/BUILD.gn b/chromium/media/base/BUILD.gn
index 1e1fa4bd1ec..2474ff9c68d 100644
--- a/chromium/media/base/BUILD.gn
+++ b/chromium/media/base/BUILD.gn
@@ -4,22 +4,24 @@
import("//build/config/android/config.gni")
import("//build/config/arm.gni")
+import("//build/config/features.gni")
import("//build/config/ui.gni")
import("//build/config/linux/pkg_config.gni")
import("//media/media_options.gni")
source_set("base") {
- visibility = [ "//media/*" ]
+ # This is part of the media component.
+ visibility = [ "//media" ]
sources = [
"audio_block_fifo.cc",
"audio_block_fifo.h",
"audio_buffer.cc",
"audio_buffer.h",
+ "audio_buffer_converter.cc",
+ "audio_buffer_converter.h",
"audio_buffer_queue.cc",
"audio_buffer_queue.h",
"audio_capturer_source.h",
- "audio_buffer_converter.cc",
- "audio_buffer_converter.h",
"audio_converter.cc",
"audio_converter.h",
"audio_decoder.cc",
@@ -43,6 +45,8 @@ source_set("base") {
"audio_renderer_mixer_input.cc",
"audio_renderer_mixer_input.h",
"audio_renderer_sink.h",
+ "audio_shifter.cc",
+ "audio_shifter.h",
"audio_splicer.cc",
"audio_splicer.h",
"audio_timestamp_helper.cc",
@@ -59,10 +63,16 @@ source_set("base") {
"byte_queue.h",
"cdm_callback_promise.cc",
"cdm_callback_promise.h",
+ "cdm_context.cc",
+ "cdm_context.h",
"cdm_factory.cc",
"cdm_factory.h",
+ "cdm_key_information.cc",
+ "cdm_key_information.h",
"cdm_promise.cc",
"cdm_promise.h",
+ "cdm_promise_adapter.cc",
+ "cdm_promise_adapter.h",
"channel_mixer.cc",
"channel_mixer.h",
"channel_mixing_matrix.cc",
@@ -91,15 +101,26 @@ source_set("base") {
"eme_constants.h",
"key_system_info.cc",
"key_system_info.h",
+ "key_systems.cc",
+ "key_systems.h",
+ "key_systems.h",
+ "key_systems_support_uma.cc",
+ "key_systems_support_uma.h",
"media.cc",
"media.h",
+ "media_client.cc",
+ "media_client.h",
"media_keys.cc",
"media_keys.h",
"media_log.cc",
"media_log.h",
"media_log_event.h",
+ "media_permission.cc",
+ "media_permission.h",
"media_switches.cc",
"media_switches.h",
+ "moving_average.cc",
+ "moving_average.h",
"multi_channel_resampler.cc",
"multi_channel_resampler.h",
"pipeline.cc",
@@ -111,9 +132,10 @@ source_set("base") {
"ranges.h",
"renderer.cc",
"renderer.h",
+ "renderer_factory.cc",
+ "renderer_factory.h",
"sample_format.cc",
"sample_format.h",
- "scoped_histogram_timer.h",
"seekable_buffer.cc",
"seekable_buffer.h",
"serial_runner.cc",
@@ -124,8 +146,6 @@ source_set("base") {
"simd/convert_yuv_to_rgb_c.cc",
"simd/filter_yuv.h",
"simd/filter_yuv_c.cc",
- "simd/yuv_to_rgb_table.cc",
- "simd/yuv_to_rgb_table.h",
"sinc_resampler.cc",
"sinc_resampler.h",
"stream_parser.cc",
@@ -146,12 +166,18 @@ source_set("base") {
"time_source.h",
"user_input_monitor.cc",
"user_input_monitor.h",
+ "video_capture_types.cc",
+ "video_capture_types.h",
+ "video_capturer_source.cc",
+ "video_capturer_source.h",
"video_decoder.cc",
"video_decoder.h",
"video_decoder_config.cc",
"video_decoder_config.h",
"video_frame.cc",
"video_frame.h",
+ "video_frame_metadata.cc",
+ "video_frame_metadata.h",
"video_frame_pool.cc",
"video_frame_pool.h",
"video_renderer.cc",
@@ -164,10 +190,19 @@ source_set("base") {
"yuv_convert.cc",
"yuv_convert.h",
]
+
+ allow_circular_includes_from = []
defines = []
- deps = [ "//skia" ]
+ public_deps = []
+ deps = [
+ "//ui/events:events_base",
+ "//skia",
+ ]
libs = []
- configs += [ "//media:media_config" ]
+ configs += [
+ "//media:media_config",
+ "//media:media_implementation",
+ ]
if (media_use_ffmpeg) {
sources += [
@@ -178,6 +213,12 @@ source_set("base") {
"media_file_checker.cc",
"media_file_checker.h",
]
+ if (is_win) {
+ sources += [ "media_win.cc" ]
+ } else if (is_posix) {
+ sources += [ "media_posix.cc" ]
+ }
+
deps += [ "//third_party/ffmpeg" ]
}
@@ -185,26 +226,31 @@ source_set("base") {
sources += [
"browser_cdm.cc",
"browser_cdm.h",
+ "browser_cdm_factory.cc",
"browser_cdm_factory.h",
]
}
if (is_android) {
sources += [ "media_stub.cc" ]
- } else if (is_win) {
- sources += [ "media_win.cc" ]
- } else if (is_posix) {
- sources += [ "media_posix.cc" ]
+ public_deps = [
+ "//media/base/android",
+ "//media/base/android:media_java",
+ "//media/base/android:media_jni_headers",
+ "//media/base/android:video_capture_jni_headers",
+ ]
+ allow_circular_includes_from += [ "//media/base/android" ]
}
if (is_linux && use_x11) {
configs += [
"//build/config/linux:x11",
"//build/config/linux:xext",
- # TODO(ajwong): Why does xent get a separate thing in //build/config/linux:BUILD.gn
- # "//build/config/linux:xdamage",
- # "//build/config/linux:xfixes",
- # "//build/config/linux:xtst",
+
+ # TODO(ajwong): Why does xent get a separate thing in //build/config/linux:BUILD.gn
+ # "//build/config/linux:xdamage",
+ # "//build/config/linux:xfixes",
+ # "//build/config/linux:xtst",
]
sources += [ "user_input_monitor_linux.cc" ]
deps += [
@@ -213,13 +259,16 @@ source_set("base") {
]
} else if (is_mac) {
sources += [ "user_input_monitor_mac.cc" ]
+
+ # Required by video_frame.cc.
+ libs = [ "CoreVideo.framework" ]
} else if (is_win) {
sources += [ "user_input_monitor_win.cc" ]
} else {
defines += [ "DISABLE_USER_INPUT_MONITOR" ]
}
- if (cpu_arch == "x86" || cpu_arch == "x64") {
+ if (current_cpu == "x86" || current_cpu == "x64") {
sources += [ "simd/convert_yuv_to_rgb_x86.cc" ]
deps += [
":media_yasm",
@@ -227,6 +276,8 @@ source_set("base") {
]
}
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
if (is_linux || is_win) {
sources += [
"keyboard_event_counter.cc",
@@ -240,13 +291,30 @@ config("base_for_cast_ios_dependent_config") {
defines = [ "MEDIA_FOR_CAST_IOS" ]
}
-source_set("base_for_cast_ios") {
- sources = [
- "video_frame.cc",
- "video_frame.h",
- ]
- configs += [ "//media:media_config" ]
- all_dependent_configs = [ ":base_for_cast_ios_dependent_config" ]
+if (is_ios) {
+ source_set("base_for_cast_ios") {
+ sources = [
+ "simd/convert_rgb_to_yuv.h",
+ "simd/convert_rgb_to_yuv_c.cc",
+ "simd/convert_yuv_to_rgb.h",
+ "simd/convert_yuv_to_rgb_c.cc",
+ "simd/filter_yuv.h",
+ "simd/filter_yuv_c.cc",
+ "video_frame.cc",
+ "video_frame.h",
+ "video_frame_metadata.cc",
+ "video_frame_metadata.h",
+ "video_util.cc",
+ "video_util.h",
+ "yuv_convert.cc",
+ "yuv_convert.h",
+ ]
+ configs += [
+ "//build/config/compiler:no_size_t_to_int_warning",
+ "//media:media_config",
+ ]
+ all_dependent_configs = [ ":base_for_cast_ios_dependent_config" ]
+ }
}
source_set("test_support") {
@@ -256,6 +324,8 @@ source_set("test_support") {
"fake_audio_render_callback.h",
"fake_audio_renderer_sink.cc",
"fake_audio_renderer_sink.h",
+ "fake_demuxer_stream.cc",
+ "fake_demuxer_stream.h",
"fake_text_track_stream.cc",
"fake_text_track_stream.h",
"gmock_callback_support.h",
@@ -265,13 +335,17 @@ source_set("test_support") {
"mock_demuxer_host.h",
"mock_filters.cc",
"mock_filters.h",
+ "null_video_sink.cc",
+ "null_video_sink.h",
"test_data_util.cc",
"test_data_util.h",
"test_helpers.cc",
"test_helpers.h",
]
configs += [ "//media:media_config" ]
- deps = [ "//testing/gmock" ]
+ deps = [
+ "//testing/gmock",
+ ]
}
source_set("unittests") {
@@ -279,8 +353,8 @@ source_set("unittests") {
sources = [
"audio_block_fifo_unittest.cc",
"audio_buffer_converter_unittest.cc",
- "audio_buffer_unittest.cc",
"audio_buffer_queue_unittest.cc",
+ "audio_buffer_unittest.cc",
"audio_bus_unittest.cc",
"audio_converter_unittest.cc",
"audio_discard_helper_unittest.cc",
@@ -290,6 +364,7 @@ source_set("unittests") {
"audio_pull_fifo_unittest.cc",
"audio_renderer_mixer_input_unittest.cc",
"audio_renderer_mixer_unittest.cc",
+ "audio_shifter_unittest.cc",
"audio_splicer_unittest.cc",
"audio_timestamp_helper_unittest.cc",
"bind_to_current_loop_unittest.cc",
@@ -302,14 +377,17 @@ source_set("unittests") {
"decoder_buffer_queue_unittest.cc",
"decoder_buffer_unittest.cc",
"djb2_unittest.cc",
+ "fake_demuxer_stream_unittest.cc",
"gmock_callback_support_unittest.cc",
+ "key_systems_unittest.cc",
+ "moving_average_unittest.cc",
"multi_channel_resampler_unittest.cc",
+ "null_video_sink_unittest.cc",
"pipeline_unittest.cc",
"ranges_unittest.cc",
"run_all_unittests.cc",
- "scoped_histogram_timer_unittest.cc",
- "serial_runner_unittest.cc",
"seekable_buffer_unittest.cc",
+ "serial_runner_unittest.cc",
"sinc_resampler_unittest.cc",
"stream_parser_unittest.cc",
"text_ranges_unittest.cc",
@@ -317,16 +395,20 @@ source_set("unittests") {
"user_input_monitor_unittest.cc",
"vector_math_testing.h",
"vector_math_unittest.cc",
- "video_frame_unittest.cc",
"video_frame_pool_unittest.cc",
+ "video_frame_unittest.cc",
"video_util_unittest.cc",
"wall_clock_time_source_unittest.cc",
"yuv_convert_unittest.cc",
]
- configs += [ "//media:media_config" ]
+ configs += [
+ "//build/config/compiler:no_size_t_to_int_warning",
+ "//media:media_config",
+ ]
deps = [
- ":base",
":test_support",
+ "//gpu/command_buffer/common",
+ "//media",
"//skia",
"//testing/gmock",
"//testing/gtest",
@@ -340,14 +422,12 @@ source_set("unittests") {
}
if (!is_android) {
- sources += [
- "container_names_unittest.cc",
- ]
+ sources += [ "container_names_unittest.cc" ]
} else {
deps += [ "//ui/gl" ]
}
- if (cpu_arch == "x86" || cpu_arch == "x64") {
+ if (current_cpu == "x86" || current_cpu == "x64") {
sources += [ "simd/convert_rgb_to_yuv_unittest.cc" ]
}
}
@@ -364,31 +444,32 @@ source_set("perftests") {
]
configs += [ "//media:media_config" ]
deps = [
- ":base",
":test_support",
+ "//media",
"//testing/gmock",
"//testing/gtest",
]
if (media_use_ffmpeg) {
- sources += [
- "demuxer_perftest.cc",
- ]
+ sources += [ "demuxer_perftest.cc" ]
}
if (is_android) {
- deps += [ "//ui/gl"]
+ deps += [ "//ui/gl" ]
}
}
-if (cpu_arch == "x86" || cpu_arch == "x64") {
+if (current_cpu == "x86" || current_cpu == "x64") {
source_set("media_sse2") {
sources = [
"simd/convert_rgb_to_yuv_sse2.cc",
"simd/convert_rgb_to_yuv_ssse3.cc",
"simd/filter_yuv_sse2.cc",
]
- configs += [ "//media:media_config" ]
+ configs += [
+ "//media:media_config",
+ "//media:media_implementation",
+ ]
if (!is_win) {
cflags = [ "-msse2" ]
}
@@ -411,9 +492,11 @@ if (cpu_arch == "x86" || cpu_arch == "x64") {
yasm_flags = [
"-DCHROMIUM",
"-DEXPORT_SYMBOLS",
+
# In addition to the same path as source asm, let yasm %include
# search path be relative to src/ per Chromium policy.
- "-I", rebase_path("..", root_build_dir),
+ "-I",
+ rebase_path("..", root_build_dir),
]
inputs = [
@@ -426,9 +509,9 @@ if (cpu_arch == "x86" || cpu_arch == "x64") {
"simd/scale_yuv_to_rgb_mmx.inc",
]
- if (cpu_arch == "x86") {
+ if (current_cpu == "x86") {
yasm_flags += [ "-DARCH_X86_32" ]
- } else if (cpu_arch == "x64") {
+ } else if (current_cpu == "x64") {
yasm_flags += [ "-DARCH_X86_64" ]
sources += [
"simd/linear_scale_yuv_to_rgb_mmx_x64.asm",
@@ -444,8 +527,8 @@ if (cpu_arch == "x86" || cpu_arch == "x64") {
} else {
if (is_posix) {
yasm_flags += [ "-DELF" ]
- if (cpu_arch == "x64") {
- # TODO(ajwong): Why isn't this true in mac?
+ if (current_cpu == "x64") {
+ # TODO(ajwong): Why isn't this true in mac?
yasm_flags += [ "-DPIC" ]
}
}
diff --git a/chromium/media/base/android/BUILD.gn b/chromium/media/base/android/BUILD.gn
index fb834d93e69..b9853b0bd94 100644
--- a/chromium/media/base/android/BUILD.gn
+++ b/chromium/media/base/android/BUILD.gn
@@ -15,15 +15,22 @@ source_set("android") {
"audio_decoder_job.cc",
"audio_decoder_job.h",
"browser_cdm_factory_android.cc",
+ "browser_cdm_factory_android.h",
"demuxer_android.h",
"demuxer_stream_player_params.cc",
"demuxer_stream_player_params.h",
+ "media_client_android.cc",
+ "media_client_android.h",
"media_codec_bridge.cc",
"media_codec_bridge.h",
+ "media_codec_player.cc",
+ "media_codec_player.h",
"media_decoder_job.cc",
"media_decoder_job.h",
"media_drm_bridge.cc",
"media_drm_bridge.h",
+ "media_drm_bridge_delegate.cc",
+ "media_drm_bridge_delegate.h",
"media_jni_registrar.cc",
"media_jni_registrar.h",
"media_player_android.cc",
@@ -44,11 +51,13 @@ source_set("android") {
"webaudio_media_codec_bridge.h",
"webaudio_media_codec_info.h",
]
- configs += [ "//media:media_config" ]
+ configs += [
+ "//media:media_config",
+ "//media:media_implementation",
+ ]
deps = [
":media_jni_headers",
"//media:shared_memory_support",
- "//media/base",
"//third_party/widevine/cdm:version_h",
"//ui/gl",
"//url",
@@ -80,8 +89,6 @@ generate_jni("media_jni_headers") {
"java/src/org/chromium/media/MediaDrmBridge.java",
"java/src/org/chromium/media/MediaPlayerBridge.java",
"java/src/org/chromium/media/MediaPlayerListener.java",
- "java/src/org/chromium/media/UsbMidiDeviceAndroid.java",
- "java/src/org/chromium/media/UsbMidiDeviceFactoryAndroid.java",
"java/src/org/chromium/media/WebAudioMediaCodecBridge.java",
]
jni_package = "media"
@@ -98,9 +105,11 @@ generate_jni("video_capture_jni_headers") {
java_cpp_enum("media_java_enums_srcjar") {
sources = [
"//media/video/capture/android/video_capture_device_android.h",
+ "//media/video/capture/video_capture_device.h",
]
outputs = [
"org/chromium/media/AndroidImageFormat.java",
+ "org/chromium/media/CaptureApiType.java",
]
}
@@ -109,9 +118,7 @@ android_library("media_java") {
"//base:base_java",
]
- srcjar_deps = [
- ":media_java_enums_srcjar",
- ]
+ srcjar_deps = [ ":media_java_enums_srcjar" ]
DEPRECATED_java_in_dir = "java/src"
}
diff --git a/chromium/media/base/android/audio_decoder_job.cc b/chromium/media/base/android/audio_decoder_job.cc
index e3769200abd..25de5a145d6 100644
--- a/chromium/media/base/android/audio_decoder_job.cc
+++ b/chromium/media/base/android/audio_decoder_job.cc
@@ -40,9 +40,11 @@ AudioDecoderJob::AudioDecoderJob(
on_demuxer_config_changed_cb),
audio_codec_(kUnknownAudioCodec),
num_channels_(0),
- sampling_rate_(0),
+ config_sampling_rate_(0),
volume_(-1.0),
- bytes_per_frame_(0) {
+ bytes_per_frame_(0),
+ output_sampling_rate_(0),
+ frame_count_(0) {
}
AudioDecoderJob::~AudioDecoderJob() {}
@@ -51,15 +53,24 @@ bool AudioDecoderJob::HasStream() const {
return audio_codec_ != kUnknownAudioCodec;
}
+void AudioDecoderJob::Flush() {
+ MediaDecoderJob::Flush();
+ frame_count_ = 0;
+}
+
void AudioDecoderJob::SetDemuxerConfigs(const DemuxerConfigs& configs) {
// TODO(qinmin): split DemuxerConfig for audio and video separately so we
// can simply store the stucture here.
audio_codec_ = configs.audio_codec;
num_channels_ = configs.audio_channels;
- sampling_rate_ = configs.audio_sampling_rate;
+ config_sampling_rate_ = configs.audio_sampling_rate;
set_is_content_encrypted(configs.is_audio_encrypted);
audio_extra_data_ = configs.audio_extra_data;
+ audio_codec_delay_ns_ = configs.audio_codec_delay_ns;
+ audio_seek_preroll_ns_ = configs.audio_seek_preroll_ns;
bytes_per_frame_ = kBytesPerAudioOutputSample * num_channels_;
+ if (!media_codec_bridge_)
+ output_sampling_rate_ = config_sampling_rate_;
}
void AudioDecoderJob::SetVolume(double volume) {
@@ -74,6 +85,14 @@ void AudioDecoderJob::SetBaseTimestamp(base::TimeDelta base_timestamp) {
audio_timestamp_helper_->SetBaseTimestamp(base_timestamp_);
}
+void AudioDecoderJob::ResetTimestampHelper() {
+ if (audio_timestamp_helper_)
+ base_timestamp_ = audio_timestamp_helper_->GetTimestamp();
+ audio_timestamp_helper_.reset(
+ new AudioTimestampHelper(output_sampling_rate_));
+ audio_timestamp_helper_->SetBaseTimestamp(base_timestamp_);
+}
+
void AudioDecoderJob::ReleaseOutputBuffer(
int output_buffer_index,
size_t size,
@@ -85,9 +104,10 @@ void AudioDecoderJob::ReleaseOutputBuffer(
int64 head_position = (static_cast<AudioCodecBridge*>(
media_codec_bridge_.get()))->PlayOutputBuffer(
output_buffer_index, size);
- audio_timestamp_helper_->AddFrames(size / bytes_per_frame_);
- int64 frames_to_play =
- audio_timestamp_helper_->frame_count() - head_position;
+ size_t new_frames_count = size / bytes_per_frame_;
+ frame_count_ += new_frames_count;
+ audio_timestamp_helper_->AddFrames(new_frames_count);
+ int64 frames_to_play = frame_count_ - head_position;
DCHECK_GE(frames_to_play, 0);
current_presentation_timestamp =
audio_timestamp_helper_->GetTimestamp() -
@@ -109,7 +129,7 @@ bool AudioDecoderJob::AreDemuxerConfigsChanged(
const DemuxerConfigs& configs) const {
return audio_codec_ != configs.audio_codec ||
num_channels_ != configs.audio_channels ||
- sampling_rate_ != configs.audio_sampling_rate ||
+ config_sampling_rate_ != configs.audio_sampling_rate ||
is_content_encrypted() != configs.is_audio_encrypted ||
audio_extra_data_.size() != configs.audio_extra_data.size() ||
!std::equal(audio_extra_data_.begin(),
@@ -117,26 +137,27 @@ bool AudioDecoderJob::AreDemuxerConfigsChanged(
configs.audio_extra_data.begin());
}
-bool AudioDecoderJob::CreateMediaCodecBridgeInternal() {
+MediaDecoderJob::MediaDecoderJobStatus
+ AudioDecoderJob::CreateMediaCodecBridgeInternal() {
media_codec_bridge_.reset(AudioCodecBridge::Create(audio_codec_));
if (!media_codec_bridge_)
- return false;
+ return STATUS_FAILURE;
if (!(static_cast<AudioCodecBridge*>(media_codec_bridge_.get()))->Start(
- audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
- audio_extra_data_.size(), true, GetMediaCrypto().obj())) {
+ audio_codec_, config_sampling_rate_, num_channels_, &audio_extra_data_[0],
+ audio_extra_data_.size(), audio_codec_delay_ns_, audio_seek_preroll_ns_,
+ true, GetMediaCrypto().obj())) {
media_codec_bridge_.reset();
- return false;
+ return STATUS_FAILURE;
}
SetVolumeInternal();
- // Need to pass the base timestamp to the new decoder.
- if (audio_timestamp_helper_)
- base_timestamp_ = audio_timestamp_helper_->GetTimestamp();
- audio_timestamp_helper_.reset(new AudioTimestampHelper(sampling_rate_));
- audio_timestamp_helper_->SetBaseTimestamp(base_timestamp_);
- return true;
+ // Reset values used to track codec bridge output
+ frame_count_ = 0;
+ ResetTimestampHelper();
+
+ return STATUS_SUCCESS;
}
void AudioDecoderJob::SetVolumeInternal() {
@@ -146,4 +167,13 @@ void AudioDecoderJob::SetVolumeInternal() {
}
}
+void AudioDecoderJob::OnOutputFormatChanged() {
+ DCHECK(media_codec_bridge_);
+
+ int old_sampling_rate = output_sampling_rate_;
+ output_sampling_rate_ = media_codec_bridge_->GetOutputSamplingRate();
+ if (output_sampling_rate_ != old_sampling_rate)
+ ResetTimestampHelper();
+}
+
} // namespace media
diff --git a/chromium/media/base/android/audio_decoder_job.h b/chromium/media/base/android/audio_decoder_job.h
index 9bc293dd2f8..0a7523fa778 100644
--- a/chromium/media/base/android/audio_decoder_job.h
+++ b/chromium/media/base/android/audio_decoder_job.h
@@ -25,42 +25,54 @@ class AudioDecoderJob : public MediaDecoderJob {
// demuxer config has changed.
AudioDecoderJob(const base::Closure& request_data_cb,
const base::Closure& on_demuxer_config_changed_cb);
- virtual ~AudioDecoderJob();
+ ~AudioDecoderJob() override;
// MediaDecoderJob implementation.
- virtual bool HasStream() const override;
- virtual void SetDemuxerConfigs(const DemuxerConfigs& configs) override;
+ bool HasStream() const override;
+ void Flush() override;
+ void SetDemuxerConfigs(const DemuxerConfigs& configs) override;
// Sets the volume of the audio output.
void SetVolume(double volume);
+ double volume() const { return volume_; }
// Sets the base timestamp for |audio_timestamp_helper_|.
void SetBaseTimestamp(base::TimeDelta base_timestamp);
private:
// MediaDecoderJob implementation.
- virtual void ReleaseOutputBuffer(
+ void ReleaseOutputBuffer(
int output_buffer_index,
size_t size,
bool render_output,
base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) override;
- virtual bool ComputeTimeToRender() const override;
- virtual bool AreDemuxerConfigsChanged(
- const DemuxerConfigs& configs) const override;
- virtual bool CreateMediaCodecBridgeInternal() override;
+ bool ComputeTimeToRender() const override;
+ bool AreDemuxerConfigsChanged(const DemuxerConfigs& configs) const override;
+ MediaDecoderJobStatus CreateMediaCodecBridgeInternal() override;
+ void OnOutputFormatChanged() override;
// Helper method to set the audio output volume.
void SetVolumeInternal();
+ void ResetTimestampHelper();
+
// Audio configs from the demuxer.
AudioCodec audio_codec_;
int num_channels_;
- int sampling_rate_;
+ int config_sampling_rate_;
std::vector<uint8> audio_extra_data_;
+ int64 audio_codec_delay_ns_;
+ int64 audio_seek_preroll_ns_;
double volume_;
int bytes_per_frame_;
+ // Audio output sample rate
+ int output_sampling_rate_;
+
+ // Frame count to sync with audio codec output
+ int64 frame_count_;
+
// Base timestamp for the |audio_timestamp_helper_|.
base::TimeDelta base_timestamp_;
diff --git a/chromium/media/base/android/browser_cdm_factory_android.cc b/chromium/media/base/android/browser_cdm_factory_android.cc
index d43c4af3bee..82f11d1385f 100644
--- a/chromium/media/base/android/browser_cdm_factory_android.cc
+++ b/chromium/media/base/android/browser_cdm_factory_android.cc
@@ -2,49 +2,56 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/base/browser_cdm_factory.h"
+#include "media/base/android/browser_cdm_factory_android.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/media_switches.h"
+#include "third_party/widevine/cdm/widevine_cdm_common.h"
namespace media {
-scoped_ptr<BrowserCdm> CreateBrowserCdm(
+scoped_ptr<BrowserCdm> BrowserCdmFactoryAndroid::CreateBrowserCdm(
const std::string& key_system,
- const BrowserCdm::SessionCreatedCB& session_created_cb,
- const BrowserCdm::SessionMessageCB& session_message_cb,
- const BrowserCdm::SessionReadyCB& session_ready_cb,
- const BrowserCdm::SessionClosedCB& session_closed_cb,
- const BrowserCdm::SessionErrorCB& session_error_cb) {
+ bool use_hw_secure_codecs,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb) {
if (!MediaDrmBridge::IsKeySystemSupported(key_system)) {
NOTREACHED() << "Unsupported key system: " << key_system;
return scoped_ptr<BrowserCdm>();
}
- scoped_ptr<MediaDrmBridge> cdm(MediaDrmBridge::Create(key_system,
- session_created_cb,
- session_message_cb,
- session_ready_cb,
- session_closed_cb,
- session_error_cb));
+ scoped_ptr<MediaDrmBridge> cdm(
+ MediaDrmBridge::Create(key_system, session_message_cb, session_closed_cb,
+ legacy_session_error_cb, session_keys_change_cb,
+ session_expiration_update_cb));
if (!cdm) {
NOTREACHED() << "MediaDrmBridge cannot be created for " << key_system;
return scoped_ptr<BrowserCdm>();
}
- // TODO(xhwang/ddorwin): Pass the security level from key system.
- MediaDrmBridge::SecurityLevel security_level =
- MediaDrmBridge::SECURITY_LEVEL_3;
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kMediaDrmEnableNonCompositing)) {
- security_level = MediaDrmBridge::SECURITY_LEVEL_1;
- }
- if (!cdm->SetSecurityLevel(security_level)) {
- DVLOG(1) << "failed to set security level " << security_level;
- return scoped_ptr<BrowserCdm>();
+ if (key_system == kWidevineKeySystem) {
+ MediaDrmBridge::SecurityLevel security_level =
+ use_hw_secure_codecs ? MediaDrmBridge::SECURITY_LEVEL_1
+ : MediaDrmBridge::SECURITY_LEVEL_3;
+ if (!cdm->SetSecurityLevel(security_level)) {
+ DVLOG(1) << "failed to set security level " << security_level;
+ return scoped_ptr<BrowserCdm>();
+ }
+ } else {
+ // Assume other key systems require hardware-secure codecs and thus do not
+ // support full compositing.
+ if (!use_hw_secure_codecs) {
+ NOTREACHED()
+ << key_system
+ << " may require use_video_overlay_for_embedded_encrypted_video";
+ return scoped_ptr<BrowserCdm>();
+ }
}
return cdm.Pass();
diff --git a/chromium/media/base/android/browser_cdm_factory_android.h b/chromium/media/base/android/browser_cdm_factory_android.h
new file mode 100644
index 00000000000..71b7970cfea
--- /dev/null
+++ b/chromium/media/base/android/browser_cdm_factory_android.h
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BROWSER_CDM_FACTORY_ANDROID_H_
+#define MEDIA_BASE_BROWSER_CDM_FACTORY_ANDROID_H_
+
+#include "base/macros.h"
+#include "media/base/browser_cdm_factory.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT BrowserCdmFactoryAndroid : public BrowserCdmFactory {
+ public:
+ BrowserCdmFactoryAndroid() {}
+ ~BrowserCdmFactoryAndroid() final {};
+
+ scoped_ptr<BrowserCdm> CreateBrowserCdm(
+ const std::string& key_system,
+ bool use_hw_secure_codecs,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb) final;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BrowserCdmFactoryAndroid);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BROWSER_CDM_FACTORY_ANDROID_H_
diff --git a/chromium/media/base/android/demuxer_stream_player_params.cc b/chromium/media/base/android/demuxer_stream_player_params.cc
index e95881eb4af..5c2a11fc284 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.cc
+++ b/chromium/media/base/android/demuxer_stream_player_params.cc
@@ -11,12 +11,14 @@ DemuxerConfigs::DemuxerConfigs()
audio_channels(0),
audio_sampling_rate(0),
is_audio_encrypted(false),
+ audio_codec_delay_ns(-1),
+ audio_seek_preroll_ns(-1),
video_codec(kUnknownVideoCodec),
is_video_encrypted(false) {}
DemuxerConfigs::~DemuxerConfigs() {}
-AccessUnit::AccessUnit() : end_of_stream(false) {}
+AccessUnit::AccessUnit() : is_end_of_stream(false), is_key_frame(false) {}
AccessUnit::~AccessUnit() {}
diff --git a/chromium/media/base/android/demuxer_stream_player_params.h b/chromium/media/base/android/demuxer_stream_player_params.h
index 0b8886eb3b1..cb8ae90e8e8 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.h
+++ b/chromium/media/base/android/demuxer_stream_player_params.h
@@ -12,7 +12,7 @@
#include "media/base/demuxer_stream.h"
#include "media/base/media_export.h"
#include "media/base/video_decoder_config.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -25,6 +25,8 @@ struct MEDIA_EXPORT DemuxerConfigs {
int audio_sampling_rate;
bool is_audio_encrypted;
std::vector<uint8> audio_extra_data;
+ int64 audio_codec_delay_ns;
+ int64 audio_seek_preroll_ns;
VideoCodec video_codec;
gfx::Size video_size;
@@ -39,13 +41,14 @@ struct MEDIA_EXPORT AccessUnit {
~AccessUnit();
DemuxerStream::Status status;
- bool end_of_stream;
+ bool is_end_of_stream;
// TODO(ycheo): Use the shared memory to transfer the block data.
std::vector<uint8> data;
base::TimeDelta timestamp;
std::vector<char> key_id;
std::vector<char> iv;
std::vector<media::SubsampleEntry> subsamples;
+ bool is_key_frame;
};
struct MEDIA_EXPORT DemuxerData {
diff --git a/chromium/media/base/android/media_client_android.cc b/chromium/media/base/android/media_client_android.cc
new file mode 100644
index 00000000000..ad4e11bd835
--- /dev/null
+++ b/chromium/media/base/android/media_client_android.cc
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_client_android.h"
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+
+namespace media {
+
+static MediaClientAndroid* g_media_client = nullptr;
+
+void SetMediaClientAndroid(MediaClientAndroid* media_client) {
+ DCHECK(!g_media_client);
+ g_media_client = media_client;
+}
+
+MediaClientAndroid* GetMediaClientAndroid() {
+ return g_media_client;
+}
+
+MediaClientAndroid::MediaClientAndroid() {
+}
+
+MediaClientAndroid::~MediaClientAndroid() {
+}
+
+void MediaClientAndroid::AddKeySystemUUIDMappings(KeySystemUuidMap* map) {
+}
+
+media::MediaDrmBridgeDelegate* MediaClientAndroid::GetMediaDrmBridgeDelegate(
+ const std::vector<uint8_t>& scheme_uuid) {
+ return nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_client_android.h b/chromium/media/base/android/media_client_android.h
new file mode 100644
index 00000000000..0753b3b5e13
--- /dev/null
+++ b/chromium/media/base/android/media_client_android.h
@@ -0,0 +1,59 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_CLIENT_ANDROID_H_
+#define MEDIA_BASE_ANDROID_MEDIA_CLIENT_ANDROID_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MediaClientAndroid;
+class MediaDrmBridgeDelegate;
+
+// Setter for MediaClientAndroid. This should be called early in embedder
+// lifecycle, before any media playback could occur.
+MEDIA_EXPORT void SetMediaClientAndroid(MediaClientAndroid* media_client);
+
+#if defined(MEDIA_IMPLEMENTATION)
+// Getter for the client. Returns nullptr if no customized client is needed.
+MediaClientAndroid* GetMediaClientAndroid();
+#endif
+
+using UUID = std::vector<uint8_t>;
+
+// A client interface for embedders (e.g. content/browser) to provide customized
+// additions to Android's browser-side media handling.
+class MEDIA_EXPORT MediaClientAndroid {
+ public:
+ typedef base::hash_map<std::string, UUID> KeySystemUuidMap;
+
+ MediaClientAndroid();
+ virtual ~MediaClientAndroid();
+
+ // Adds extra mappings from key-system name to Android UUID into |map|.
+ virtual void AddKeySystemUUIDMappings(KeySystemUuidMap* map);
+
+ // Returns a MediaDrmBridgeDelegate that corresponds to |scheme_uuid|.
+ // MediaClientAndroid retains ownership.
+ virtual media::MediaDrmBridgeDelegate* GetMediaDrmBridgeDelegate(
+ const UUID& scheme_uuid);
+
+ private:
+ friend class KeySystemManager;
+
+ base::hash_map<std::string, UUID> key_system_uuid_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaClientAndroid);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_CLIENT_ANDROID_H_
diff --git a/chromium/media/base/android/media_codec_bridge.cc b/chromium/media/base/android/media_codec_bridge.cc
index 27d931e36c9..14f33002ea8 100644
--- a/chromium/media/base/android/media_codec_bridge.cc
+++ b/chromium/media/base/android/media_codec_bridge.cc
@@ -4,8 +4,6 @@
#include "media/base/android/media_codec_bridge.h"
-#include <jni.h>
-#include <string>
#include "base/android/build_info.h"
#include "base/android/jni_android.h"
@@ -41,6 +39,8 @@ static const std::string AudioCodecToAndroidMimeType(const AudioCodec& codec) {
return "audio/mpeg";
case kCodecVorbis:
return "audio/vorbis";
+ case kCodecOpus:
+ return "audio/opus";
case kCodecAAC:
return "audio/mp4a-latm";
default:
@@ -73,6 +73,8 @@ static const std::string CodecTypeToAndroidMimeType(const std::string& codec) {
return "video/x-vnd.on2.vp9";
if (codec == "vorbis")
return "audio/vorbis";
+ if (codec == "opus")
+ return "audio/opus";
return std::string();
}
@@ -92,6 +94,8 @@ static const std::string AndroidMimeTypeToCodecType(const std::string& mime) {
return "mp3";
if (mime == "audio/vorbis")
return "vorbis";
+ if (mime == "audio/opus")
+ return "opus";
return std::string();
}
@@ -238,10 +242,14 @@ bool MediaCodecBridge::IsKnownUnaccelerated(const std::string& mime_type,
// HW-acceleration but it doesn't. Android Media guidance is that the
// "OMX.google" prefix is always used for SW decoders, so that's what we
// use. "OMX.SEC.*" codec is Samsung software implementation - report it
- // as unaccelerated as well.
+ // as unaccelerated as well. Also temporary blacklist Exynos and MediaTek
+ // devices while HW decoder video freezes and distortions are
+ // investigated - http://crbug.com/446974.
if (codec_name.length() > 0) {
return (StartsWithASCII(codec_name, "OMX.google.", true) ||
- StartsWithASCII(codec_name, "OMX.SEC.", true));
+ StartsWithASCII(codec_name, "OMX.SEC.", true) ||
+ StartsWithASCII(codec_name, "OMX.MTK.", true) ||
+ StartsWithASCII(codec_name, "OMX.Exynos.", true));
}
return true;
}
@@ -266,8 +274,7 @@ MediaCodecBridge::~MediaCodecBridge() {
bool MediaCodecBridge::StartInternal() {
JNIEnv* env = AttachCurrentThread();
- return Java_MediaCodecBridge_start(env, j_media_codec_.obj()) &&
- GetOutputBuffers();
+ return Java_MediaCodecBridge_start(env, j_media_codec_.obj());
}
MediaCodecStatus MediaCodecBridge::Reset() {
@@ -288,6 +295,12 @@ void MediaCodecBridge::GetOutputFormat(int* width, int* height) {
*height = Java_MediaCodecBridge_getOutputHeight(env, j_media_codec_.obj());
}
+int MediaCodecBridge::GetOutputSamplingRate() {
+ JNIEnv* env = AttachCurrentThread();
+
+ return Java_MediaCodecBridge_getOutputSamplingRate(env, j_media_codec_.obj());
+}
+
MediaCodecStatus MediaCodecBridge::QueueInputBuffer(
int index,
const uint8* data,
@@ -449,11 +462,6 @@ void MediaCodecBridge::ReleaseOutputBuffer(int index, bool render) {
env, j_media_codec_.obj(), index, render);
}
-int MediaCodecBridge::GetInputBuffersCount() {
- JNIEnv* env = AttachCurrentThread();
- return Java_MediaCodecBridge_getInputBuffersCount(env, j_media_codec_.obj());
-}
-
int MediaCodecBridge::GetOutputBuffersCount() {
JNIEnv* env = AttachCurrentThread();
return Java_MediaCodecBridge_getOutputBuffersCount(env, j_media_codec_.obj());
@@ -465,11 +473,6 @@ size_t MediaCodecBridge::GetOutputBuffersCapacity() {
j_media_codec_.obj());
}
-bool MediaCodecBridge::GetOutputBuffers() {
- JNIEnv* env = AttachCurrentThread();
- return Java_MediaCodecBridge_getOutputBuffers(env, j_media_codec_.obj());
-}
-
void MediaCodecBridge::GetInputBuffer(int input_buffer_index,
uint8** data,
size_t* capacity) {
@@ -526,6 +529,8 @@ bool AudioCodecBridge::Start(const AudioCodec& codec,
int channel_count,
const uint8* extra_data,
size_t extra_data_size,
+ int64 codec_delay_ns,
+ int64 seek_preroll_ns,
bool play_audio,
jobject media_crypto) {
JNIEnv* env = AttachCurrentThread();
@@ -543,8 +548,10 @@ bool AudioCodecBridge::Start(const AudioCodec& codec,
env, j_mime.obj(), sample_rate, channel_count));
DCHECK(!j_format.is_null());
- if (!ConfigureMediaFormat(j_format.obj(), codec, extra_data, extra_data_size))
+ if (!ConfigureMediaFormat(j_format.obj(), codec, extra_data, extra_data_size,
+ codec_delay_ns, seek_preroll_ns)) {
return false;
+ }
if (!Java_MediaCodecBridge_configureAudio(
env, media_codec(), j_format.obj(), media_crypto, 0, play_audio)) {
@@ -557,8 +564,10 @@ bool AudioCodecBridge::Start(const AudioCodec& codec,
bool AudioCodecBridge::ConfigureMediaFormat(jobject j_format,
const AudioCodec& codec,
const uint8* extra_data,
- size_t extra_data_size) {
- if (extra_data_size == 0)
+ size_t extra_data_size,
+ int64 codec_delay_ns,
+ int64 seek_preroll_ns) {
+ if (extra_data_size == 0 && codec != kCodecOpus)
return true;
JNIEnv* env = AttachCurrentThread();
@@ -649,6 +658,33 @@ bool AudioCodecBridge::ConfigureMediaFormat(jobject j_format,
Java_MediaCodecBridge_setFrameHasADTSHeader(env, j_format);
break;
}
+ case kCodecOpus: {
+ if (!extra_data || extra_data_size == 0 ||
+ codec_delay_ns < 0 || seek_preroll_ns < 0) {
+ LOG(ERROR) << "Invalid Opus Header";
+ return false;
+ }
+
+ // csd0 - Opus Header
+ ScopedJavaLocalRef<jbyteArray> csd0 =
+ base::android::ToJavaByteArray(env, extra_data, extra_data_size);
+ Java_MediaCodecBridge_setCodecSpecificData(env, j_format, 0, csd0.obj());
+
+ // csd1 - Codec Delay
+ ScopedJavaLocalRef<jbyteArray> csd1 =
+ base::android::ToJavaByteArray(
+ env, reinterpret_cast<const uint8*>(&codec_delay_ns),
+ sizeof(int64_t));
+ Java_MediaCodecBridge_setCodecSpecificData(env, j_format, 1, csd1.obj());
+
+ // csd2 - Seek Preroll
+ ScopedJavaLocalRef<jbyteArray> csd2 =
+ base::android::ToJavaByteArray(
+ env, reinterpret_cast<const uint8*>(&seek_preroll_ns),
+ sizeof(int64_t));
+ Java_MediaCodecBridge_setCodecSpecificData(env, j_format, 2, csd2.obj());
+ break;
+ }
default:
LOG(ERROR) << "Invalid header encountered for codec: "
<< AudioCodecToAndroidMimeType(codec);
diff --git a/chromium/media/base/android/media_codec_bridge.h b/chromium/media/base/android/media_codec_bridge.h
index f7c9e2b4c23..c5eef442e55 100644
--- a/chromium/media/base/android/media_codec_bridge.h
+++ b/chromium/media/base/android/media_codec_bridge.h
@@ -12,7 +12,7 @@
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/video_decoder_config.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -29,7 +29,7 @@ enum MediaCodecStatus {
MEDIA_CODEC_INPUT_END_OF_STREAM,
MEDIA_CODEC_OUTPUT_END_OF_STREAM,
MEDIA_CODEC_NO_KEY,
- MEDIA_CODEC_STOPPED,
+ MEDIA_CODEC_ABORT,
MEDIA_CODEC_ERROR
};
@@ -107,8 +107,9 @@ class MEDIA_EXPORT MediaCodecBridge {
// returns a format change by returning INFO_OUTPUT_FORMAT_CHANGED
void GetOutputFormat(int* width, int* height);
- // Returns the number of input buffers used by the codec.
- int GetInputBuffersCount();
+ // Used for checking for new sampling rate after DequeueInputBuffer() returns
+ // INFO_OUTPUT_FORMAT_CHANGED
+ int GetOutputSamplingRate();
// Submits a byte array to the given input buffer. Call this after getting an
// available buffer from DequeueInputBuffer(). If |data| is NULL, assume the
@@ -169,16 +170,13 @@ class MEDIA_EXPORT MediaCodecBridge {
void ReleaseOutputBuffer(int index, bool render);
// Returns the number of output buffers used by the codec.
+ // TODO(qinmin): this call is deprecated in Lollipop.
int GetOutputBuffersCount();
// Returns the capacity of each output buffer used by the codec.
+ // TODO(qinmin): this call is deprecated in Lollipop.
size_t GetOutputBuffersCapacity();
- // Gets output buffers from media codec and keeps them inside the java class.
- // To access them, use DequeueOutputBuffer(). Returns whether output buffers
- // were successfully obtained.
- bool GetOutputBuffers() WARN_UNUSED_RESULT;
-
// Returns an input buffer's base pointer and capacity.
void GetInputBuffer(int input_buffer_index, uint8** data, size_t* capacity);
@@ -231,6 +229,7 @@ class AudioCodecBridge : public MediaCodecBridge {
// Start the audio codec bridge.
bool Start(const AudioCodec& codec, int sample_rate, int channel_count,
const uint8* extra_data, size_t extra_data_size,
+ int64 codec_delay_ns, int64 seek_preroll_ns,
bool play_audio, jobject media_crypto) WARN_UNUSED_RESULT;
// Play the output buffer. This call must be called after
@@ -246,7 +245,8 @@ class AudioCodecBridge : public MediaCodecBridge {
// Configure the java MediaFormat object with the extra codec data passed in.
bool ConfigureMediaFormat(jobject j_format, const AudioCodec& codec,
- const uint8* extra_data, size_t extra_data_size);
+ const uint8* extra_data, size_t extra_data_size,
+ int64 codec_delay_ns, int64 seek_preroll_ns);
};
class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
diff --git a/chromium/media/base/android/media_codec_bridge_unittest.cc b/chromium/media/base/android/media_codec_bridge_unittest.cc
index c72e6a1a20c..f1c04711d08 100644
--- a/chromium/media/base/android/media_codec_bridge_unittest.cc
+++ b/chromium/media/base/android/media_codec_bridge_unittest.cc
@@ -160,7 +160,8 @@ TEST(MediaCodecBridgeTest, DoNormal) {
scoped_ptr<media::AudioCodecBridge> media_codec;
media_codec.reset(AudioCodecBridge::Create(kCodecMP3));
- ASSERT_TRUE(media_codec->Start(kCodecMP3, 44100, 2, NULL, 0, false, NULL));
+ ASSERT_TRUE(media_codec->Start(
+ kCodecMP3, 44100, 2, NULL, 0, 0, 0, false, NULL));
int input_buf_index = -1;
MediaCodecStatus status =
@@ -205,7 +206,6 @@ TEST(MediaCodecBridgeTest, DoNormal) {
continue;
case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
- ASSERT_TRUE(media_codec->GetOutputBuffers());
continue;
default:
@@ -230,12 +230,13 @@ TEST(MediaCodecBridgeTest, InvalidVorbisHeader) {
uint8 invalid_first_byte[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
EXPECT_FALSE(media_codec->Start(
kCodecVorbis, 44100, 2, invalid_first_byte, sizeof(invalid_first_byte),
- false, NULL));
+ 0, 0, false, NULL));
// Size of the header does not match with the data we passed in.
uint8 invalid_size[] = { 0x02, 0x01, 0xff, 0x01, 0xff };
EXPECT_FALSE(media_codec->Start(
- kCodecVorbis, 44100, 2, invalid_size, sizeof(invalid_size), false, NULL));
+ kCodecVorbis, 44100, 2, invalid_size, sizeof(invalid_size),
+ 0, 0, false, NULL));
// Size of the header is too large.
size_t large_size = 8 * 1024 * 1024 + 2;
@@ -245,10 +246,33 @@ TEST(MediaCodecBridgeTest, InvalidVorbisHeader) {
very_large_header[i] = 0xff;
very_large_header[large_size - 1] = 0xfe;
EXPECT_FALSE(media_codec->Start(
- kCodecVorbis, 44100, 2, very_large_header, 0x80000000, false, NULL));
+ kCodecVorbis, 44100, 2, very_large_header, 0x80000000,
+ 0, 0, false, NULL));
delete[] very_large_header;
}
+TEST(MediaCodecBridgeTest, InvalidOpusHeader) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ scoped_ptr<media::AudioCodecBridge> media_codec;
+ media_codec.reset(AudioCodecBridge::Create(kCodecOpus));
+ uint8 dummy_extra_data[] = { 0, 0 };
+
+ // Extra Data is NULL.
+ EXPECT_FALSE(media_codec->Start(
+ kCodecOpus, 48000, 2, NULL, 0, -1, 0, false, NULL));
+
+ // Codec Delay is < 0.
+ EXPECT_FALSE(media_codec->Start(
+ kCodecOpus, 48000, 2, dummy_extra_data, sizeof(dummy_extra_data),
+ -1, 0, false, NULL));
+
+ // Seek Preroll is < 0.
+ EXPECT_FALSE(media_codec->Start(
+ kCodecOpus, 48000, 2, dummy_extra_data, sizeof(dummy_extra_data),
+ 0, -1, false, NULL));
+}
+
TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
diff --git a/chromium/media/base/android/media_codec_player.cc b/chromium/media/base/android/media_codec_player.cc
new file mode 100644
index 00000000000..d7f84733660
--- /dev/null
+++ b/chromium/media/base/android/media_codec_player.cc
@@ -0,0 +1,252 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_codec_player.h"
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+
+#define RUN_ON_MEDIA_THREAD(METHOD, ...) \
+ do { \
+ if (!GetMediaTaskRunner()->BelongsToCurrentThread()) { \
+ GetMediaTaskRunner()->PostTask( \
+ FROM_HERE, \
+ base::Bind(&MediaCodecPlayer:: METHOD, weak_this_, ##__VA_ARGS__)); \
+ return; \
+ } \
+ } while(0)
+
+
+namespace media {
+
+class MediaThread : public base::Thread {
+ public:
+ MediaThread() : base::Thread("BrowserMediaThread") {
+ Start();
+ }
+};
+
+// Create media thread
+base::LazyInstance<MediaThread>::Leaky
+ g_media_thread = LAZY_INSTANCE_INITIALIZER;
+
+
+scoped_refptr<base::SingleThreadTaskRunner> GetMediaTaskRunner() {
+ return g_media_thread.Pointer()->task_runner();
+}
+
+// MediaCodecPlayer implementation.
+
+MediaCodecPlayer::MediaCodecPlayer(
+ int player_id,
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ scoped_ptr<DemuxerAndroid> demuxer,
+ const GURL& frame_url)
+ : MediaPlayerAndroid(player_id,
+ manager,
+ request_media_resources_cb,
+ frame_url),
+ ui_task_runner_(base::MessageLoopProxy::current()),
+ demuxer_(demuxer.Pass()),
+ weak_factory_(this) {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ DVLOG(1) << "MediaCodecPlayer::MediaCodecPlayer: player_id:" << player_id;
+
+ weak_this_ = weak_factory_.GetWeakPtr();
+
+ // Finish initializaton on Media thread
+ GetMediaTaskRunner()->PostTask(
+ FROM_HERE, base::Bind(&MediaCodecPlayer::Initialize, weak_this_));
+}
+
+MediaCodecPlayer::~MediaCodecPlayer()
+{
+ // Media thread
+ DVLOG(1) << "MediaCodecPlayer::~MediaCodecPlayer";
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+}
+
+void MediaCodecPlayer::Initialize() {
+ // Media thread
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ demuxer_->Initialize(this);
+}
+
+// MediaPlayerAndroid implementation.
+
+void MediaCodecPlayer::DeleteOnCorrectThread() {
+ // UI thread
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ // The listener-related portion of the base class has to be
+ // destroyed on UI thread.
+ DestroyListenerOnUIThread();
+
+ // Post deletion onto Media thread
+ GetMediaTaskRunner()->DeleteSoon(FROM_HERE, this);
+}
+
+void MediaCodecPlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
+ RUN_ON_MEDIA_THREAD(SetVideoSurface, base::Passed(&surface));
+
+ // Media thread
+ DVLOG(1) << __FUNCTION__;
+
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::Start() {
+ RUN_ON_MEDIA_THREAD(Start);
+
+ // Media thread
+ DVLOG(1) << __FUNCTION__;
+
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::Pause(bool is_media_related_action) {
+ RUN_ON_MEDIA_THREAD(Pause, is_media_related_action);
+
+ // Media thread
+ DVLOG(1) << __FUNCTION__;
+
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::SeekTo(base::TimeDelta timestamp) {
+ RUN_ON_MEDIA_THREAD(SeekTo, timestamp);
+
+ // Media thread
+ DVLOG(1) << __FUNCTION__ << " " << timestamp;
+
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::Release() {
+ RUN_ON_MEDIA_THREAD(Release);
+
+ // Media thread
+ DVLOG(1) << __FUNCTION__;
+
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::SetVolume(double volume) {
+ RUN_ON_MEDIA_THREAD(SetVolume, volume);
+
+ // Media thread
+ DVLOG(1) << __FUNCTION__ << " " << volume;
+
+ NOTIMPLEMENTED();
+}
+
+int MediaCodecPlayer::GetVideoWidth() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ NOTIMPLEMENTED();
+ return 320;
+}
+
+int MediaCodecPlayer::GetVideoHeight() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ NOTIMPLEMENTED();
+ return 240;
+}
+
+base::TimeDelta MediaCodecPlayer::GetCurrentTime() {
+ // UI thread, Media thread
+ NOTIMPLEMENTED();
+ return base::TimeDelta();
+}
+
+base::TimeDelta MediaCodecPlayer::GetDuration() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ NOTIMPLEMENTED();
+ return base::TimeDelta();
+}
+
+bool MediaCodecPlayer::IsPlaying() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+ return false;
+}
+
+bool MediaCodecPlayer::CanPause() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+ return false;
+}
+
+bool MediaCodecPlayer::CanSeekForward() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+ return false;
+}
+
+bool MediaCodecPlayer::CanSeekBackward() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+ return false;
+}
+
+bool MediaCodecPlayer::IsPlayerReady() {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+ return true;
+}
+
+void MediaCodecPlayer::SetCdm(BrowserCdm* cdm) {
+ // UI thread
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+}
+
+// Callbacks from Demuxer.
+
+void MediaCodecPlayer::OnDemuxerConfigsAvailable(
+ const DemuxerConfigs& configs) {
+ // Media thread
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::OnDemuxerDataAvailable(const DemuxerData& data) {
+ // Media thread
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::OnDemuxerSeekDone(
+ base::TimeDelta actual_browser_seek_time) {
+ // Media thread
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+}
+
+void MediaCodecPlayer::OnDemuxerDurationChanged(
+ base::TimeDelta duration) {
+ // Media thread
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_codec_player.h b/chromium/media/base/android/media_codec_player.h
new file mode 100644
index 00000000000..518adbbbd8b
--- /dev/null
+++ b/chromium/media/base/android/media_codec_player.h
@@ -0,0 +1,85 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_CODEC_PLAYER_H_
+#define MEDIA_BASE_ANDROID_MEDIA_CODEC_PLAYER_H_
+
+#include "base/android/scoped_java_ref.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread.h"
+#include "media/base/android/demuxer_android.h"
+#include "media/base/android/media_player_android.h"
+#include "media/base/media_export.h"
+#include "ui/gl/android/scoped_java_surface.h"
+
+namespace media {
+
+class BrowserCdm;
+
+// Returns the task runner for the media thread
+MEDIA_EXPORT scoped_refptr<base::SingleThreadTaskRunner> GetMediaTaskRunner();
+
+
+// This class implements the media player using Android's MediaCodec.
+// It differs from MediaSourcePlayer in that it removes most
+// processing away from UI thread: it uses a dedicated Media thread
+// to receive the data and to handle commands.
+class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
+ public DemuxerAndroidClient {
+ public:
+ // Constructs a player with the given ID and demuxer. |manager| must outlive
+ // the lifetime of this object.
+ MediaCodecPlayer(int player_id,
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ scoped_ptr<DemuxerAndroid> demuxer,
+ const GURL& frame_url);
+ ~MediaCodecPlayer() override;
+
+ // MediaPlayerAndroid implementation.
+ void DeleteOnCorrectThread() override;
+ void SetVideoSurface(gfx::ScopedJavaSurface surface) override;
+ void Start() override;
+ void Pause(bool is_media_related_action) override;
+ void SeekTo(base::TimeDelta timestamp) override;
+ void Release() override;
+ void SetVolume(double volume) override;
+ int GetVideoWidth() override;
+ int GetVideoHeight() override;
+ base::TimeDelta GetCurrentTime() override;
+ base::TimeDelta GetDuration() override;
+ bool IsPlaying() override;
+ bool CanPause() override;
+ bool CanSeekForward() override;
+ bool CanSeekBackward() override;
+ bool IsPlayerReady() override;
+ void SetCdm(BrowserCdm* cdm) override;
+
+ // DemuxerAndroidClient implementation.
+ void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) override;
+ void OnDemuxerDataAvailable(const DemuxerData& params) override;
+ void OnDemuxerSeekDone(base::TimeDelta actual_browser_seek_time) override;
+ void OnDemuxerDurationChanged(base::TimeDelta duration) override;
+
+ // Helper methods
+ void Initialize();
+ void DestroySelf();
+
+ private:
+ // Object for posting tasks on UI thread.
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+
+ scoped_ptr<DemuxerAndroid> demuxer_;
+
+ base::WeakPtr<MediaCodecPlayer> weak_this_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<MediaCodecPlayer> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaCodecPlayer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_CODEC_PLAYER_H_
diff --git a/chromium/media/base/android/media_decoder_job.cc b/chromium/media/base/android/media_decoder_job.cc
index 61df5c07e19..005634928ff 100644
--- a/chromium/media/base/android/media_decoder_job.cc
+++ b/chromium/media/base/android/media_decoder_job.cc
@@ -6,9 +6,9 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/debug/trace_event.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/android/media_codec_bridge.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/buffers.h"
@@ -25,7 +25,7 @@ MediaDecoderJob::MediaDecoderJob(
const base::Closure& request_data_cb,
const base::Closure& config_changed_cb)
: need_to_reconfig_decoder_job_(false),
- ui_task_runner_(base::MessageLoopProxy::current()),
+ ui_task_runner_(base::ThreadTaskRunnerHandle::Get()),
decoder_task_runner_(decoder_task_runner),
needs_flush_(false),
input_eos_encountered_(false),
@@ -45,7 +45,7 @@ MediaDecoderJob::MediaDecoderJob(
drm_bridge_(NULL),
drain_decoder_(false) {
InitializeReceivedData();
- eos_unit_.end_of_stream = true;
+ eos_unit_.is_end_of_stream = true;
}
MediaDecoderJob::~MediaDecoderJob() {
@@ -88,7 +88,7 @@ void MediaDecoderJob::OnDataReceived(const DemuxerData& data) {
if (stop_decode_pending_) {
DCHECK(is_decoding());
- OnDecodeCompleted(MEDIA_CODEC_STOPPED, kNoTimestamp(), kNoTimestamp());
+ OnDecodeCompleted(MEDIA_CODEC_ABORT, kNoTimestamp(), kNoTimestamp());
return;
}
@@ -110,7 +110,7 @@ void MediaDecoderJob::Prefetch(const base::Closure& prefetch_cb) {
RequestData(prefetch_cb);
}
-bool MediaDecoderJob::Decode(
+MediaDecoderJob::MediaDecoderJobStatus MediaDecoderJob::Decode(
base::TimeTicks start_time_ticks,
base::TimeDelta start_presentation_timestamp,
const DecoderCallback& callback) {
@@ -120,10 +120,11 @@ bool MediaDecoderJob::Decode(
if (!media_codec_bridge_ || need_to_reconfig_decoder_job_) {
if (drain_decoder_)
OnDecoderDrained();
- need_to_reconfig_decoder_job_ = !CreateMediaCodecBridge();
+ MediaDecoderJobStatus status = CreateMediaCodecBridge();
+ need_to_reconfig_decoder_job_ = (status != STATUS_SUCCESS);
skip_eos_enqueue_ = true;
if (need_to_reconfig_decoder_job_)
- return false;
+ return status;
}
decode_cb_ = callback;
@@ -133,11 +134,11 @@ bool MediaDecoderJob::Decode(
base::Unretained(this),
start_time_ticks,
start_presentation_timestamp));
- return true;
+ return STATUS_SUCCESS;
}
DecodeCurrentAccessUnit(start_time_ticks, start_presentation_timestamp);
- return true;
+ return STATUS_SUCCESS;
}
void MediaDecoderJob::StopDecode() {
@@ -205,6 +206,32 @@ base::android::ScopedJavaLocalRef<jobject> MediaDecoderJob::GetMediaCrypto() {
return media_crypto;
}
+bool MediaDecoderJob::SetCurrentFrameToPreviouslyCachedKeyFrame() {
+ const std::vector<AccessUnit>& access_units =
+ received_data_[current_demuxer_data_index_].access_units;
+ // If the current data chunk is empty, the player must be in an initial or
+ // seek state. The next access unit will always be a key frame.
+ if (access_units.size() == 0)
+ return true;
+
+ // Find key frame in all the access units the decoder have decoded,
+ // or is about to decode.
+ int i = std::min(access_unit_index_[current_demuxer_data_index_],
+ access_units.size() - 1);
+ for (; i >= 0; --i) {
+ // Config change is always the last access unit, and it always come with
+ // a key frame afterwards.
+ if (access_units[i].status == DemuxerStream::kConfigChanged)
+ return true;
+ if (access_units[i].is_key_frame) {
+ access_unit_index_[current_demuxer_data_index_] = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+
void MediaDecoderJob::Release() {
DCHECK(ui_task_runner_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
@@ -246,7 +273,7 @@ MediaCodecStatus MediaDecoderJob::QueueInputBuffer(const AccessUnit& unit) {
// TODO(qinmin): skip frames if video is falling far behind.
DCHECK_GE(input_buf_index, 0);
- if (unit.end_of_stream || unit.data.empty()) {
+ if (unit.is_end_of_stream || unit.data.empty()) {
media_codec_bridge_->QueueEOS(input_buf_index);
return MEDIA_CODEC_INPUT_END_OF_STREAM;
}
@@ -381,13 +408,12 @@ void MediaDecoderJob::DecodeInternal(
// For aborted access unit, just skip it and inform the player.
if (unit.status == DemuxerStream::kAborted) {
- // TODO(qinmin): use a new enum instead of MEDIA_CODEC_STOPPED.
- callback.Run(MEDIA_CODEC_STOPPED, kNoTimestamp(), kNoTimestamp());
+ callback.Run(MEDIA_CODEC_ABORT, kNoTimestamp(), kNoTimestamp());
return;
}
if (skip_eos_enqueue_) {
- if (unit.end_of_stream || unit.data.empty()) {
+ if (unit.is_end_of_stream || unit.data.empty()) {
input_eos_encountered_ = true;
output_eos_encountered_ = true;
callback.Run(MEDIA_CODEC_OUTPUT_END_OF_STREAM, kNoTimestamp(),
@@ -430,12 +456,10 @@ void MediaDecoderJob::DecodeInternal(
&presentation_timestamp,
&output_eos_encountered_,
NULL);
- if (status == MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED &&
- !media_codec_bridge_->GetOutputBuffers()) {
- status = MEDIA_CODEC_ERROR;
- } else if (status == MEDIA_CODEC_OUTPUT_FORMAT_CHANGED) {
+ if (status == MEDIA_CODEC_OUTPUT_FORMAT_CHANGED) {
// TODO(qinmin): instead of waiting for the next output buffer to be
// dequeued, post a task on the UI thread to signal the format change.
+ OnOutputFormatChanged();
has_format_change = true;
}
} while (status != MEDIA_CODEC_OK && status != MEDIA_CODEC_ERROR &&
@@ -521,17 +545,14 @@ void MediaDecoderJob::OnDecodeCompleted(
case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
case MEDIA_CODEC_OUTPUT_END_OF_STREAM:
- if (!input_eos_encountered_) {
- CurrentDataConsumed(
- CurrentAccessUnit().status == DemuxerStream::kConfigChanged);
+ if (!input_eos_encountered_)
access_unit_index_[current_demuxer_data_index_]++;
- }
break;
case MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
case MEDIA_CODEC_INPUT_END_OF_STREAM:
case MEDIA_CODEC_NO_KEY:
- case MEDIA_CODEC_STOPPED:
+ case MEDIA_CODEC_ABORT:
case MEDIA_CODEC_ERROR:
// Do nothing.
break;
@@ -592,9 +613,9 @@ void MediaDecoderJob::RequestCurrentChunkIfEmpty() {
// Requests new data if the the last access unit of the next chunk is not EOS.
current_demuxer_data_index_ = inactive_demuxer_data_index();
- const AccessUnit last_access_unit =
+ const AccessUnit& last_access_unit =
received_data_[current_demuxer_data_index_].access_units.back();
- if (!last_access_unit.end_of_stream &&
+ if (!last_access_unit.is_end_of_stream &&
last_access_unit.status != DemuxerStream::kAborted) {
RequestData(base::Closure());
}
@@ -619,26 +640,26 @@ void MediaDecoderJob::OnDecoderDrained() {
// Increase the access unit index so that the new decoder will not handle
// the config change again.
access_unit_index_[current_demuxer_data_index_]++;
- CurrentDataConsumed(true);
}
-bool MediaDecoderJob::CreateMediaCodecBridge() {
+MediaDecoderJob::MediaDecoderJobStatus
+ MediaDecoderJob::CreateMediaCodecBridge() {
DVLOG(1) << __FUNCTION__;
DCHECK(ui_task_runner_->BelongsToCurrentThread());
DCHECK(decode_cb_.is_null());
if (!HasStream()) {
ReleaseMediaCodecBridge();
- return false;
+ return STATUS_FAILURE;
}
// Create |media_codec_bridge_| only if config changes.
if (media_codec_bridge_ && !need_to_reconfig_decoder_job_)
- return true;
+ return STATUS_SUCCESS;
base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
if (is_content_encrypted_ && media_crypto.is_null())
- return false;
+ return STATUS_FAILURE;
ReleaseMediaCodecBridge();
DVLOG(1) << __FUNCTION__ << " : creating new media codec bridge";
@@ -653,6 +674,8 @@ bool MediaDecoderJob::IsCodecReconfigureNeeded(
return true;
}
+void MediaDecoderJob::OnOutputFormatChanged() {}
+
bool MediaDecoderJob::UpdateOutputFormat() {
return false;
}
diff --git a/chromium/media/base/android/media_decoder_job.h b/chromium/media/base/android/media_decoder_job.h
index 15d963530bb..329512f2c6b 100644
--- a/chromium/media/base/android/media_decoder_job.h
+++ b/chromium/media/base/android/media_decoder_job.h
@@ -27,6 +27,13 @@ class MediaDrmBridge;
// data request will be sent to the renderer.
class MediaDecoderJob {
public:
+ // Return value when Decode() is called.
+ enum MediaDecoderJobStatus {
+ STATUS_SUCCESS,
+ STATUS_KEY_FRAME_REQUIRED,
+ STATUS_FAILURE,
+ };
+
struct Deleter {
inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
};
@@ -57,20 +64,19 @@ class MediaDecoderJob {
// Called by MediaSourcePlayer to decode some data.
// |callback| - Run when decode operation has completed.
//
- // Returns true if the next decode was started and |callback| will be
- // called when the decode operation is complete.
- // Returns false if |media_codec_bridge_| cannot be created; |callback| is
- // ignored and will not be called.
- bool Decode(base::TimeTicks start_time_ticks,
- base::TimeDelta start_presentation_timestamp,
- const DecoderCallback& callback);
+ // Returns STATUS_SUCCESS on success, or STATUS_FAILURE on failure, or
+ // STATUS_KEY_FRAME_REQUIRED if a browser seek is required. |callback| is
+ // ignored and will not be called for the latter 2 cases.
+ MediaDecoderJobStatus Decode(base::TimeTicks start_time_ticks,
+ base::TimeDelta start_presentation_timestamp,
+ const DecoderCallback& callback);
// Called to stop the last Decode() early.
// If the decoder is in the process of decoding the next frame, then
// this method will just allow the decode to complete as normal. If
// this object is waiting for a data request to complete, then this method
// will wait for the data to arrive and then call the |callback|
- // passed to Decode() with a status of MEDIA_CODEC_STOPPED. This ensures that
+ // passed to Decode() with a status of MEDIA_CODEC_ABORT. This ensures that
// the |callback| passed to Decode() is always called and the status
// reflects whether data was actually decoded or the decode terminated early.
void StopDecode();
@@ -102,6 +108,9 @@ class MediaDecoderJob {
bool prerolling() const { return prerolling_; }
+ // Returns true if this object has data to decode.
+ bool HasData() const;
+
protected:
// Creates a new MediaDecoderJob instance.
// |decoder_task_runner| - Thread on which the decoder task will run.
@@ -132,6 +141,11 @@ class MediaDecoderJob {
// Releases the |media_codec_bridge_|.
void ReleaseMediaCodecBridge();
+ // Sets the current frame to a previously cached key frame. Returns true if
+ // a key frame is found, or false otherwise.
+ // TODO(qinmin): add UMA to study the cache hit ratio for key frames.
+ bool SetCurrentFrameToPreviouslyCachedKeyFrame();
+
MediaDrmBridge* drm_bridge() { return drm_bridge_; }
void set_is_content_encrypted(bool is_content_encrypted) {
@@ -151,9 +165,6 @@ class MediaDecoderJob {
// Queues an access unit into |media_codec_bridge_|'s input buffer.
MediaCodecStatus QueueInputBuffer(const AccessUnit& unit);
- // Returns true if this object has data to decode.
- bool HasData() const;
-
// Initiates a request for more data.
// |done_cb| is called when more data is available in |received_data_|.
void RequestData(const base::Closure& done_cb);
@@ -209,18 +220,16 @@ class MediaDecoderJob {
// Called when the decoder is completely drained and is ready to be released.
void OnDecoderDrained();
- // Creates |media_codec_bridge_| for decoding purpose. Returns true if it is
- // created, or false otherwise.
- bool CreateMediaCodecBridge();
-
- // Called when an access unit is consumed by the decoder. |is_config_change|
- // indicates whether the current access unit is a config change. If it is
- // true, the next access unit is guarateed to be an I-frame.
- virtual void CurrentDataConsumed(bool is_config_change) {}
+ // Creates |media_codec_bridge_| for decoding purpose.
+ // Returns STATUS_SUCCESS on success, or STATUS_FAILURE on failure, or
+ // STATUS_KEY_FRAME_REQUIRED if a browser seek is required.
+ MediaDecoderJobStatus CreateMediaCodecBridge();
// Implemented by the child class to create |media_codec_bridge_| for a
- // particular stream. Returns true if it is created, or false otherwise.
- virtual bool CreateMediaCodecBridgeInternal() = 0;
+ // particular stream.
+ // Returns STATUS_SUCCESS on success, or STATUS_FAILURE on failure, or
+ // STATUS_KEY_FRAME_REQUIRED if a browser seek is required.
+ virtual MediaDecoderJobStatus CreateMediaCodecBridgeInternal() = 0;
// Returns true if the |configs| doesn't match the current demuxer configs
// the decoder job has.
@@ -231,6 +240,11 @@ class MediaDecoderJob {
// new DemuxerConfigs, or false otherwise.
virtual bool IsCodecReconfigureNeeded(const DemuxerConfigs& configs) const;
+ // Signals to decoder job that decoder has updated output format. Decoder job
+ // may need to do internal reconfiguration in order to correctly interpret
+ // incoming buffers
+ virtual void OnOutputFormatChanged();
+
// Update the output format from the decoder, returns true if the output
// format changes, or false otherwise.
virtual bool UpdateOutputFormat();
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
index 3cb5bace517..fcb446b28b2 100644
--- a/chromium/media/base/android/media_drm_bridge.cc
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -14,10 +14,16 @@
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/sys_byteorder.h"
+#include "base/sys_info.h"
+#include "base/thread_task_runner_handle.h"
#include "jni/MediaDrmBridge_jni.h"
+#include "media/base/android/media_client_android.h"
+#include "media/base/android/media_drm_bridge_delegate.h"
+#include "media/base/cdm_key_information.h"
#include "widevine_cdm_version.h" // In SHARED_INTERMEDIATE_DIR.
@@ -29,85 +35,71 @@ using base::android::ScopedJavaLocalRef;
namespace media {
-static uint32 ReadUint32(const uint8_t* data) {
- uint32 value = 0;
- for (int i = 0; i < 4; ++i)
- value = (value << 8) | data[i];
- return value;
-}
-
-static uint64 ReadUint64(const uint8_t* data) {
- uint64 value = 0;
- for (int i = 0; i < 8; ++i)
- value = (value << 8) | data[i];
- return value;
-}
-
-// The structure of an ISO CENC Protection System Specific Header (PSSH) box is
-// as follows. (See ISO/IEC FDIS 23001-7:2011(E).)
-// Note: ISO boxes use big-endian values.
-//
-// PSSH {
-// uint32 Size
-// uint32 Type
-// uint64 LargeSize # Field is only present if value(Size) == 1.
-// uint32 VersionAndFlags
-// uint8[16] SystemId
-// uint32 DataSize
-// uint8[DataSize] Data
-// }
-const int kBoxHeaderSize = 8; // Box's header contains Size and Type.
-const int kBoxLargeSizeSize = 8;
-const int kPsshVersionFlagSize = 4;
-const int kPsshSystemIdSize = 16;
-const int kPsshDataSizeSize = 4;
-const uint32 kTencType = 0x74656e63;
-const uint32 kPsshType = 0x70737368;
+namespace {
+
+// DrmBridge supports session expiration event but doesn't provide detailed
+// status for each key ID, which is required by the EME spec. Use a dummy key ID
+// here to report session expiration info.
+const char kDummyKeyId[] = "Dummy Key Id";
+
+// Returns string session ID from jbyteArray (byte[] in Java).
+std::string GetSessionId(JNIEnv* env, jbyteArray j_session_id) {
+ std::vector<uint8> session_id_vector;
+ JavaByteArrayToByteVector(env, j_session_id, &session_id_vector);
+ return std::string(session_id_vector.begin(), session_id_vector.end());
+}
+
const uint8 kWidevineUuid[16] = {
0xED, 0xEF, 0x8B, 0xA9, 0x79, 0xD6, 0x4A, 0xCE,
0xA3, 0xC8, 0x27, 0xDC, 0xD5, 0x1D, 0x21, 0xED };
-typedef std::vector<uint8> UUID;
+// Convert |init_data_type| to a string supported by MediaDRM.
+// "audio"/"video" does not matter, so use "video".
+std::string ConvertInitDataType(media::EmeInitDataType init_data_type) {
+ // TODO(jrummell): API level >=20 supports "webm" and "cenc", so switch
+ // to those strings.
+ switch (init_data_type) {
+ case media::EmeInitDataType::WEBM:
+ return "video/webm";
+ case media::EmeInitDataType::CENC:
+ return "video/mp4";
+ default:
+ NOTREACHED();
+ return "video/unknown";
+ }
+}
-class KeySystemUuidManager {
+class KeySystemManager {
public:
- KeySystemUuidManager();
+ KeySystemManager();
UUID GetUUID(const std::string& key_system);
- void AddMapping(const std::string& key_system, const UUID& uuid);
std::vector<std::string> GetPlatformKeySystemNames();
private:
- typedef base::hash_map<std::string, UUID> KeySystemUuidMap;
+ using KeySystemUuidMap = MediaClientAndroid::KeySystemUuidMap;
KeySystemUuidMap key_system_uuid_map_;
- DISALLOW_COPY_AND_ASSIGN(KeySystemUuidManager);
+ DISALLOW_COPY_AND_ASSIGN(KeySystemManager);
};
-KeySystemUuidManager::KeySystemUuidManager() {
+KeySystemManager::KeySystemManager() {
// Widevine is always supported in Android.
key_system_uuid_map_[kWidevineKeySystem] =
UUID(kWidevineUuid, kWidevineUuid + arraysize(kWidevineUuid));
+ MediaClientAndroid* client = GetMediaClientAndroid();
+ if (client)
+ client->AddKeySystemUUIDMappings(&key_system_uuid_map_);
}
-UUID KeySystemUuidManager::GetUUID(const std::string& key_system) {
+UUID KeySystemManager::GetUUID(const std::string& key_system) {
KeySystemUuidMap::iterator it = key_system_uuid_map_.find(key_system);
if (it == key_system_uuid_map_.end())
return UUID();
return it->second;
}
-void KeySystemUuidManager::AddMapping(const std::string& key_system,
- const UUID& uuid) {
- KeySystemUuidMap::iterator it = key_system_uuid_map_.find(key_system);
- DCHECK(it == key_system_uuid_map_.end())
- << "Shouldn't overwrite an existing key system.";
- if (it != key_system_uuid_map_.end())
- return;
- key_system_uuid_map_[key_system] = uuid;
-}
-
-std::vector<std::string> KeySystemUuidManager::GetPlatformKeySystemNames() {
+std::vector<std::string> KeySystemManager::GetPlatformKeySystemNames() {
std::vector<std::string> key_systems;
for (KeySystemUuidMap::iterator it = key_system_uuid_map_.begin();
it != key_system_uuid_map_.end(); ++it) {
@@ -118,94 +110,33 @@ std::vector<std::string> KeySystemUuidManager::GetPlatformKeySystemNames() {
return key_systems;
}
-base::LazyInstance<KeySystemUuidManager>::Leaky g_key_system_uuid_manager =
+base::LazyInstance<KeySystemManager>::Leaky g_key_system_manager =
LAZY_INSTANCE_INITIALIZER;
-// Tries to find a PSSH box whose "SystemId" is |uuid| in |data|, parses the
-// "Data" of the box and put it in |pssh_data|. Returns true if such a box is
-// found and successfully parsed. Returns false otherwise.
-// Notes:
-// 1, If multiple PSSH boxes are found,the "Data" of the first matching PSSH box
-// will be set in |pssh_data|.
-// 2, Only PSSH and TENC boxes are allowed in |data|. TENC boxes are skipped.
-static bool GetPsshData(const uint8* data, int data_size,
- const UUID& uuid,
- std::vector<uint8>* pssh_data) {
- const uint8* cur = data;
- const uint8* data_end = data + data_size;
- int bytes_left = data_size;
-
- while (bytes_left > 0) {
- const uint8* box_head = cur;
-
- if (bytes_left < kBoxHeaderSize)
- return false;
-
- uint64_t box_size = ReadUint32(cur);
- uint32 type = ReadUint32(cur + 4);
- cur += kBoxHeaderSize;
- bytes_left -= kBoxHeaderSize;
-
- if (box_size == 1) { // LargeSize is present.
- if (bytes_left < kBoxLargeSizeSize)
- return false;
-
- box_size = ReadUint64(cur);
- cur += kBoxLargeSizeSize;
- bytes_left -= kBoxLargeSizeSize;
- } else if (box_size == 0) {
- box_size = bytes_left + kBoxHeaderSize;
- }
-
- const uint8* box_end = box_head + box_size;
- if (data_end < box_end)
- return false;
-
- if (type == kTencType) {
- // Skip 'tenc' box.
- cur = box_end;
- bytes_left = data_end - cur;
- continue;
- } else if (type != kPsshType) {
- return false;
- }
-
- const int kPsshBoxMinimumSize =
- kPsshVersionFlagSize + kPsshSystemIdSize + kPsshDataSizeSize;
- if (box_end < cur + kPsshBoxMinimumSize)
- return false;
-
- uint32 version_and_flags = ReadUint32(cur);
- cur += kPsshVersionFlagSize;
- bytes_left -= kPsshVersionFlagSize;
- if (version_and_flags != 0)
- return false;
-
- DCHECK_GE(bytes_left, kPsshSystemIdSize);
- if (!std::equal(uuid.begin(), uuid.end(), cur)) {
- cur = box_end;
- bytes_left = data_end - cur;
- continue;
- }
-
- cur += kPsshSystemIdSize;
- bytes_left -= kPsshSystemIdSize;
-
- uint32 data_size = ReadUint32(cur);
- cur += kPsshDataSizeSize;
- bytes_left -= kPsshDataSizeSize;
-
- if (box_end < cur + data_size)
- return false;
+// Checks whether |key_system| is supported with |container_mime_type|. Only
+// checks |key_system| support if |container_mime_type| is empty.
+// TODO(xhwang): The |container_mime_type| is not the same as contentType in
+// the EME spec. Revisit this once the spec issue with initData type is
+// resolved.
+bool IsKeySystemSupportedWithTypeImpl(const std::string& key_system,
+ const std::string& container_mime_type) {
+ if (!MediaDrmBridge::IsAvailable())
+ return false;
- pssh_data->assign(cur, cur + data_size);
- return true;
- }
+ UUID scheme_uuid = g_key_system_manager.Get().GetUUID(key_system);
+ if (scheme_uuid.empty())
+ return false;
- return false;
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
+ base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
+ ScopedJavaLocalRef<jstring> j_container_mime_type =
+ ConvertUTF8ToJavaString(env, container_mime_type);
+ return Java_MediaDrmBridge_isCryptoSchemeSupported(
+ env, j_scheme_uuid.obj(), j_container_mime_type.obj());
}
-static MediaDrmBridge::SecurityLevel GetSecurityLevelFromString(
+MediaDrmBridge::SecurityLevel GetSecurityLevelFromString(
const std::string& security_level_str) {
if (0 == security_level_str.compare("L1"))
return MediaDrmBridge::SECURITY_LEVEL_1;
@@ -215,7 +146,7 @@ static MediaDrmBridge::SecurityLevel GetSecurityLevelFromString(
return MediaDrmBridge::SECURITY_LEVEL_NONE;
}
-static std::string GetSecurityLevelString(
+std::string GetSecurityLevelString(
MediaDrmBridge::SecurityLevel security_level) {
switch (security_level) {
case MediaDrmBridge::SECURITY_LEVEL_NONE:
@@ -228,35 +159,26 @@ static std::string GetSecurityLevelString(
return "";
}
-// Checks whether |key_system| is supported with |container_mime_type|. Only
-// checks |key_system| support if |container_mime_type| is empty.
-// TODO(xhwang): The |container_mime_type| is not the same as contentType in
-// the EME spec. Revisit this once the spec issue with initData type is
-// resolved.
-static bool IsKeySystemSupportedWithTypeImpl(
- const std::string& key_system,
- const std::string& container_mime_type) {
- if (!MediaDrmBridge::IsAvailable())
- return false;
+} // namespace
- UUID scheme_uuid = g_key_system_uuid_manager.Get().GetUUID(key_system);
- if (scheme_uuid.empty())
+// static
+bool MediaDrmBridge::IsAvailable() {
+ if (base::android::BuildInfo::GetInstance()->sdk_int() < 19)
return false;
- JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
- base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
- ScopedJavaLocalRef<jstring> j_container_mime_type =
- ConvertUTF8ToJavaString(env, container_mime_type);
- return Java_MediaDrmBridge_isCryptoSchemeSupported(
- env, j_scheme_uuid.obj(), j_container_mime_type.obj());
-}
+ int32 os_major_version = 0;
+ int32 os_minor_version = 0;
+ int32 os_bugfix_version = 0;
+ base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
+ if (os_major_version == 4 && os_minor_version == 4 && os_bugfix_version == 0)
+ return false;
-// static
-bool MediaDrmBridge::IsAvailable() {
- return base::android::BuildInfo::GetInstance()->sdk_int() >= 19;
+ return true;
}
+// TODO(ddorwin): This is specific to Widevine. http://crbug.com/459400
// static
bool MediaDrmBridge::IsSecureDecoderRequired(SecurityLevel security_level) {
DCHECK(IsAvailable());
@@ -264,31 +186,8 @@ bool MediaDrmBridge::IsSecureDecoderRequired(SecurityLevel security_level) {
}
// static
-bool MediaDrmBridge::IsSecurityLevelSupported(const std::string& key_system,
- SecurityLevel security_level) {
- if (!IsAvailable())
- return false;
-
- scoped_ptr<MediaDrmBridge> media_drm_bridge =
- MediaDrmBridge::CreateSessionless(key_system);
- if (!media_drm_bridge)
- return false;
-
- return media_drm_bridge->SetSecurityLevel(security_level);
-}
-
-static void AddKeySystemUuidMapping(JNIEnv* env, jclass clazz,
- jstring j_key_system,
- jobject j_buffer) {
- std::string key_system = ConvertJavaStringToUTF8(env, j_key_system);
- uint8* buffer = static_cast<uint8*>(env->GetDirectBufferAddress(j_buffer));
- UUID uuid(buffer, buffer + 16);
- g_key_system_uuid_manager.Get().AddMapping(key_system, uuid);
-}
-
-// static
std::vector<std::string> MediaDrmBridge::GetPlatformKeySystemNames() {
- return g_key_system_uuid_manager.Get().GetPlatformKeySystemNames();
+ return g_key_system_manager.Get().GetPlatformKeySystemNames();
}
// static
@@ -309,18 +208,17 @@ bool MediaDrmBridge::RegisterMediaDrmBridge(JNIEnv* env) {
return RegisterNativesImpl(env);
}
-MediaDrmBridge::MediaDrmBridge(const std::vector<uint8>& scheme_uuid,
- const SessionCreatedCB& session_created_cb,
- const SessionMessageCB& session_message_cb,
- const SessionReadyCB& session_ready_cb,
- const SessionClosedCB& session_closed_cb,
- const SessionErrorCB& session_error_cb)
+MediaDrmBridge::MediaDrmBridge(
+ const std::vector<uint8>& scheme_uuid,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb)
: scheme_uuid_(scheme_uuid),
- session_created_cb_(session_created_cb),
session_message_cb_(session_message_cb),
- session_ready_cb_(session_ready_cb),
session_closed_cb_(session_closed_cb),
- session_error_cb_(session_error_cb) {
+ legacy_session_error_cb_(legacy_session_error_cb),
+ session_keys_change_cb_(session_keys_change_cb) {
JNIEnv* env = AttachCurrentThread();
CHECK(env);
@@ -334,31 +232,29 @@ MediaDrmBridge::~MediaDrmBridge() {
JNIEnv* env = AttachCurrentThread();
player_tracker_.NotifyCdmUnset();
if (!j_media_drm_.is_null())
- Java_MediaDrmBridge_release(env, j_media_drm_.obj());
+ Java_MediaDrmBridge_destroy(env, j_media_drm_.obj());
}
// static
+// TODO(xhwang): Enable SessionExpirationUpdateCB when it is supported.
scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
const std::string& key_system,
- const SessionCreatedCB& session_created_cb,
const SessionMessageCB& session_message_cb,
- const SessionReadyCB& session_ready_cb,
const SessionClosedCB& session_closed_cb,
- const SessionErrorCB& session_error_cb) {
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& /* session_expiration_update_cb */) {
scoped_ptr<MediaDrmBridge> media_drm_bridge;
if (!IsAvailable())
return media_drm_bridge.Pass();
- UUID scheme_uuid = g_key_system_uuid_manager.Get().GetUUID(key_system);
+ UUID scheme_uuid = g_key_system_manager.Get().GetUUID(key_system);
if (scheme_uuid.empty())
return media_drm_bridge.Pass();
- media_drm_bridge.reset(new MediaDrmBridge(scheme_uuid,
- session_created_cb,
- session_message_cb,
- session_ready_cb,
- session_closed_cb,
- session_error_cb));
+ media_drm_bridge.reset(
+ new MediaDrmBridge(scheme_uuid, session_message_cb, session_closed_cb,
+ legacy_session_error_cb, session_keys_change_cb));
if (media_drm_bridge->j_media_drm_.is_null())
media_drm_bridge.reset();
@@ -367,17 +263,21 @@ scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
}
// static
-scoped_ptr<MediaDrmBridge> MediaDrmBridge::CreateSessionless(
+scoped_ptr<MediaDrmBridge> MediaDrmBridge::CreateWithoutSessionSupport(
const std::string& key_system) {
- return MediaDrmBridge::Create(key_system,
- SessionCreatedCB(),
- SessionMessageCB(),
- SessionReadyCB(),
- SessionClosedCB(),
- SessionErrorCB());
+ return MediaDrmBridge::Create(
+ key_system, SessionMessageCB(), SessionClosedCB(), LegacySessionErrorCB(),
+ SessionKeysChangeCB(), SessionExpirationUpdateCB());
}
bool MediaDrmBridge::SetSecurityLevel(SecurityLevel security_level) {
+ if (security_level != SECURITY_LEVEL_NONE &&
+ !std::equal(scheme_uuid_.begin(), scheme_uuid_.end(), kWidevineUuid)) {
+ NOTREACHED() << "Widevine security level " << security_level
+ << "used with another key system";
+ return false;
+ }
+
JNIEnv* env = AttachCurrentThread();
std::string security_level_str = GetSecurityLevelString(security_level);
@@ -390,70 +290,113 @@ bool MediaDrmBridge::SetSecurityLevel(SecurityLevel security_level) {
env, j_media_drm_.obj(), j_security_level.obj());
}
-bool MediaDrmBridge::CreateSession(uint32 session_id,
- const std::string& content_type,
- const uint8* init_data,
- int init_data_length) {
+void MediaDrmBridge::SetServerCertificate(
+ const std::vector<uint8_t>& certificate,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
+ promise->reject(NOT_SUPPORTED_ERROR, 0,
+ "SetServerCertificate() is not supported.");
+}
+
+void MediaDrmBridge::CreateSessionAndGenerateRequest(
+ SessionType session_type,
+ media::EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ scoped_ptr<media::NewSessionCdmPromise> promise) {
DVLOG(1) << __FUNCTION__;
- DCHECK(!session_created_cb_.is_null())
- << "CreateSession called on a sessionless MediaDrmBridge object.";
+ if (session_type != media::MediaKeys::TEMPORARY_SESSION) {
+ promise->reject(NOT_SUPPORTED_ERROR, 0,
+ "Only the temporary session type is supported.");
+ return;
+ }
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_init_data;
- // Caller should always use "video/*" content types.
- DCHECK_EQ(0u, content_type.find("video/"));
-
- // Widevine MediaDrm plugin only accepts the "data" part of the PSSH box as
- // the init data when using MP4 container.
- if (std::equal(scheme_uuid_.begin(), scheme_uuid_.end(), kWidevineUuid) &&
- content_type == "video/mp4") {
- std::vector<uint8> pssh_data;
- if (!GetPsshData(init_data, init_data_length, scheme_uuid_, &pssh_data))
- return false;
- j_init_data =
- base::android::ToJavaByteArray(env, &pssh_data[0], pssh_data.size());
- } else {
- j_init_data =
- base::android::ToJavaByteArray(env, init_data, init_data_length);
+ ScopedJavaLocalRef<jobjectArray> j_optional_parameters;
+
+ MediaClientAndroid* client = GetMediaClientAndroid();
+ if (client) {
+ MediaDrmBridgeDelegate* delegate =
+ client->GetMediaDrmBridgeDelegate(scheme_uuid_);
+ if (delegate) {
+ std::vector<uint8> init_data_from_delegate;
+ std::vector<std::string> optional_parameters_from_delegate;
+ if (!delegate->OnCreateSession(init_data_type, init_data,
+ &init_data_from_delegate,
+ &optional_parameters_from_delegate)) {
+ promise->reject(INVALID_ACCESS_ERROR, 0, "Invalid init data.");
+ }
+ if (!init_data_from_delegate.empty()) {
+ j_init_data = base::android::ToJavaByteArray(
+ env, vector_as_array(&init_data_from_delegate),
+ init_data_from_delegate.size());
+ }
+ if (!optional_parameters_from_delegate.empty()) {
+ j_optional_parameters = base::android::ToJavaArrayOfStrings(
+ env, optional_parameters_from_delegate);
+ }
+ }
+ }
+
+ if (j_init_data.is_null()) {
+ j_init_data = base::android::ToJavaByteArray(
+ env, vector_as_array(&init_data), init_data.size());
}
ScopedJavaLocalRef<jstring> j_mime =
- ConvertUTF8ToJavaString(env, content_type);
- Java_MediaDrmBridge_createSession(
- env, j_media_drm_.obj(), session_id, j_init_data.obj(), j_mime.obj());
- return true;
+ ConvertUTF8ToJavaString(env, ConvertInitDataType(init_data_type));
+ uint32_t promise_id = cdm_promise_adapter_.SavePromise(promise.Pass());
+ Java_MediaDrmBridge_createSessionFromNative(env, j_media_drm_.obj(),
+ j_init_data.obj(), j_mime.obj(),
+ j_optional_parameters.obj(),
+ promise_id);
}
-void MediaDrmBridge::LoadSession(uint32 session_id,
- const std::string& web_session_id) {
- // MediaDrmBridge doesn't support loading sessions.
- NOTREACHED();
+void MediaDrmBridge::LoadSession(
+ SessionType session_type,
+ const std::string& session_id,
+ scoped_ptr<media::NewSessionCdmPromise> promise) {
+ promise->reject(NOT_SUPPORTED_ERROR, 0, "LoadSession() is not supported.");
}
-void MediaDrmBridge::UpdateSession(uint32 session_id,
- const uint8* response,
- int response_length) {
+void MediaDrmBridge::UpdateSession(
+ const std::string& session_id,
+ const std::vector<uint8_t>& response,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
DVLOG(1) << __FUNCTION__;
- DCHECK(!session_ready_cb_.is_null())
- << __FUNCTION__ << " called on a sessionless MediaDrmBridge object.";
-
JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jbyteArray> j_response =
- base::android::ToJavaByteArray(env, response, response_length);
- Java_MediaDrmBridge_updateSession(
- env, j_media_drm_.obj(), session_id, j_response.obj());
-}
-
-void MediaDrmBridge::ReleaseSession(uint32 session_id) {
+ ScopedJavaLocalRef<jbyteArray> j_response = base::android::ToJavaByteArray(
+ env, vector_as_array(&response), response.size());
+ ScopedJavaLocalRef<jbyteArray> j_session_id = base::android::ToJavaByteArray(
+ env, reinterpret_cast<const uint8_t*>(session_id.data()),
+ session_id.size());
+ uint32_t promise_id = cdm_promise_adapter_.SavePromise(promise.Pass());
+ Java_MediaDrmBridge_updateSession(env, j_media_drm_.obj(), j_session_id.obj(),
+ j_response.obj(), promise_id);
+}
+
+void MediaDrmBridge::CloseSession(const std::string& session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
DVLOG(1) << __FUNCTION__;
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_session_id = base::android::ToJavaByteArray(
+ env, reinterpret_cast<const uint8_t*>(session_id.data()),
+ session_id.size());
+ uint32_t promise_id = cdm_promise_adapter_.SavePromise(promise.Pass());
+ Java_MediaDrmBridge_closeSession(env, j_media_drm_.obj(), j_session_id.obj(),
+ promise_id);
+}
- DCHECK(!session_closed_cb_.is_null())
- << __FUNCTION__ << " called on a sessionless MediaDrmBridge object.";
+void MediaDrmBridge::RemoveSession(
+ const std::string& session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
+ promise->reject(NOT_SUPPORTED_ERROR, 0, "RemoveSession() is not supported.");
+}
- JNIEnv* env = AttachCurrentThread();
- Java_MediaDrmBridge_releaseSession(env, j_media_drm_.obj(), session_id);
+CdmContext* MediaDrmBridge::GetCdmContext() {
+ NOTREACHED();
+ return nullptr;
}
int MediaDrmBridge::RegisterPlayer(const base::Closure& new_key_cb,
@@ -474,7 +417,7 @@ void MediaDrmBridge::SetMediaCryptoReadyCB(const base::Closure& closure) {
DCHECK(media_crypto_ready_cb_.is_null());
if (!GetMediaCrypto().is_null()) {
- base::MessageLoopProxy::current()->PostTask(FROM_HERE, closure);
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, closure);
return;
}
@@ -487,54 +430,80 @@ void MediaDrmBridge::OnMediaCryptoReady(JNIEnv* env, jobject) {
base::ResetAndReturn(&media_crypto_ready_cb_).Run();
}
-void MediaDrmBridge::OnSessionCreated(JNIEnv* env,
- jobject j_media_drm,
- jint j_session_id,
- jstring j_web_session_id) {
- uint32 session_id = j_session_id;
- std::string web_session_id = ConvertJavaStringToUTF8(env, j_web_session_id);
- session_created_cb_.Run(session_id, web_session_id);
+void MediaDrmBridge::OnPromiseResolved(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_promise_id) {
+ cdm_promise_adapter_.ResolvePromise(j_promise_id);
+}
+
+void MediaDrmBridge::OnPromiseResolvedWithSession(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_promise_id,
+ jbyteArray j_session_id) {
+ cdm_promise_adapter_.ResolvePromise(j_promise_id,
+ GetSessionId(env, j_session_id));
+}
+
+void MediaDrmBridge::OnPromiseRejected(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_promise_id,
+ jstring j_error_message) {
+ std::string error_message = ConvertJavaStringToUTF8(env, j_error_message);
+ cdm_promise_adapter_.RejectPromise(j_promise_id, MediaKeys::UNKNOWN_ERROR, 0,
+ error_message);
}
void MediaDrmBridge::OnSessionMessage(JNIEnv* env,
jobject j_media_drm,
- jint j_session_id,
+ jbyteArray j_session_id,
jbyteArray j_message,
- jstring j_destination_url) {
- uint32 session_id = j_session_id;
+ jstring j_legacy_destination_url) {
std::vector<uint8> message;
JavaByteArrayToByteVector(env, j_message, &message);
- GURL destination_gurl = GURL(ConvertJavaStringToUTF8(env, j_destination_url));
- if (!destination_gurl.is_valid() && !destination_gurl.is_empty()) {
- DLOG(WARNING) << "SessionMessage destination_url is invalid : "
- << destination_gurl.possibly_invalid_spec();
- destination_gurl = GURL::EmptyGURL(); // Replace invalid destination_url.
- }
- session_message_cb_.Run(session_id, message, destination_gurl);
-}
+ GURL legacy_destination_url =
+ GURL(ConvertJavaStringToUTF8(env, j_legacy_destination_url));
+ // Note: Message type is not supported in MediaDrm. Do our best guess here.
+ media::MediaKeys::MessageType message_type =
+ legacy_destination_url.is_empty() ? media::MediaKeys::LICENSE_REQUEST
+ : media::MediaKeys::LICENSE_RENEWAL;
-void MediaDrmBridge::OnSessionReady(JNIEnv* env,
- jobject j_media_drm,
- jint j_session_id) {
- uint32 session_id = j_session_id;
- session_ready_cb_.Run(session_id);
- // TODO(xhwang/jrummell): Move this when usableKeyIds/keyschange are
- // implemented.
- player_tracker_.NotifyNewKey();
+ session_message_cb_.Run(GetSessionId(env, j_session_id), message_type,
+ message, legacy_destination_url);
}
void MediaDrmBridge::OnSessionClosed(JNIEnv* env,
jobject j_media_drm,
- jint j_session_id) {
- uint32 session_id = j_session_id;
- session_closed_cb_.Run(session_id);
+ jbyteArray j_session_id) {
+ session_closed_cb_.Run(GetSessionId(env, j_session_id));
}
-void MediaDrmBridge::OnSessionError(JNIEnv* env,
- jobject j_media_drm,
- jint j_session_id) {
- uint32 session_id = j_session_id;
- session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+void MediaDrmBridge::OnSessionKeysChange(JNIEnv* env,
+ jobject j_media_drm,
+ jbyteArray j_session_id,
+ bool has_additional_usable_key,
+ jint j_key_status) {
+ if (has_additional_usable_key)
+ player_tracker_.NotifyNewKey();
+
+ scoped_ptr<CdmKeyInformation> cdm_key_information(new CdmKeyInformation());
+ cdm_key_information->key_id.assign(kDummyKeyId,
+ kDummyKeyId + sizeof(kDummyKeyId));
+ cdm_key_information->status =
+ static_cast<CdmKeyInformation::KeyStatus>(j_key_status);
+ CdmKeysInfo cdm_keys_info;
+ cdm_keys_info.push_back(cdm_key_information.release());
+
+ session_keys_change_cb_.Run(GetSessionId(env, j_session_id),
+ has_additional_usable_key, cdm_keys_info.Pass());
+}
+
+void MediaDrmBridge::OnLegacySessionError(JNIEnv* env,
+ jobject j_media_drm,
+ jbyteArray j_session_id,
+ jstring j_error_message) {
+ std::string error_message = ConvertJavaStringToUTF8(env, j_error_message);
+ legacy_session_error_cb_.Run(GetSessionId(env, j_session_id),
+ MediaKeys::UNKNOWN_ERROR, 0, error_message);
}
ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
@@ -552,7 +521,12 @@ MediaDrmBridge::SecurityLevel MediaDrmBridge::GetSecurityLevel() {
}
bool MediaDrmBridge::IsProtectedSurfaceRequired() {
- return IsSecureDecoderRequired(GetSecurityLevel());
+ // For Widevine, this depends on the security level.
+ if (std::equal(scheme_uuid_.begin(), scheme_uuid_.end(), kWidevineUuid))
+ return IsSecureDecoderRequired(GetSecurityLevel());
+
+ // For other key systems, assume true.
+ return true;
}
void MediaDrmBridge::ResetDeviceCredentials(
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
index 20da617a001..30842b97731 100644
--- a/chromium/media/base/android/media_drm_bridge.h
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -13,6 +13,7 @@
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/browser_cdm.h"
+#include "media/base/cdm_promise_adapter.h"
#include "media/base/media_export.h"
#include "media/cdm/player_tracker_impl.h"
#include "url/gurl.h"
@@ -24,9 +25,9 @@ namespace media {
class MediaPlayerManager;
// This class provides DRM services for android EME implementation.
-// TODO(qinmin): implement all the functions in this class.
class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
public:
+ // TODO(ddorwin): These are specific to Widevine. http://crbug.com/459400
enum SecurityLevel {
SECURITY_LEVEL_NONE = 0,
SECURITY_LEVEL_1 = 1,
@@ -35,16 +36,13 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
typedef base::Callback<void(bool)> ResetCredentialsCB;
- virtual ~MediaDrmBridge();
+ ~MediaDrmBridge() override;
// Checks whether MediaDRM is available.
// All other static methods check IsAvailable() internally. There's no need
// to check IsAvailable() explicitly before calling them.
static bool IsAvailable();
- static bool IsSecurityLevelSupported(const std::string& key_system,
- SecurityLevel security_level);
-
// Checks whether |key_system| is supported.
static bool IsKeySystemSupported(const std::string& key_system);
@@ -58,24 +56,23 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
const std::string& key_system,
const std::string& container_mime_type);
- static bool IsSecureDecoderRequired(SecurityLevel security_level);
-
static bool RegisterMediaDrmBridge(JNIEnv* env);
// Returns a MediaDrmBridge instance if |key_system| is supported, or a NULL
// pointer otherwise.
+ // TODO(xhwang): Is it okay not to update session expiration info?
static scoped_ptr<MediaDrmBridge> Create(
const std::string& key_system,
- const SessionCreatedCB& session_created_cb,
const SessionMessageCB& session_message_cb,
- const SessionReadyCB& session_ready_cb,
const SessionClosedCB& session_closed_cb,
- const SessionErrorCB& session_error_cb);
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb);
// Returns a MediaDrmBridge instance if |key_system| is supported, or a NULL
// otherwise. No session callbacks are provided. This is used when we need to
// use MediaDrmBridge without creating any sessions.
- static scoped_ptr<MediaDrmBridge> CreateSessionless(
+ static scoped_ptr<MediaDrmBridge> CreateWithoutSessionSupport(
const std::string& key_system);
// Returns true if |security_level| is successfully set, or false otherwise.
@@ -87,20 +84,31 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
// object. Calling this function multiples times may cause errors.
bool SetSecurityLevel(SecurityLevel security_level);
- // BrowserCdm implementations.
- virtual bool CreateSession(uint32 session_id,
- const std::string& content_type,
- const uint8* init_data,
- int init_data_length) override;
- virtual void LoadSession(uint32 session_id,
- const std::string& web_session_id) override;
- virtual void UpdateSession(uint32 session_id,
- const uint8* response,
- int response_length) override;
- virtual void ReleaseSession(uint32 session_id) override;
- virtual int RegisterPlayer(const base::Closure& new_key_cb,
- const base::Closure& cdm_unset_cb) override;
- virtual void UnregisterPlayer(int registration_id) override;
+ // MediaKeys (via BrowserCdm) implementation.
+ void SetServerCertificate(
+ const std::vector<uint8_t>& certificate,
+ scoped_ptr<media::SimpleCdmPromise> promise) override;
+ void CreateSessionAndGenerateRequest(
+ SessionType session_type,
+ media::EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ scoped_ptr<media::NewSessionCdmPromise> promise) override;
+ void LoadSession(SessionType session_type,
+ const std::string& session_id,
+ scoped_ptr<media::NewSessionCdmPromise> promise) override;
+ void UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
+ scoped_ptr<media::SimpleCdmPromise> promise) override;
+ void CloseSession(const std::string& session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) override;
+ void RemoveSession(const std::string& session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) override;
+ CdmContext* GetCdmContext() override;
+
+ // PlayerTracker (via BrowserCdm) implementation.
+ int RegisterPlayer(const base::Closure& new_key_cb,
+ const base::Closure& cdm_unset_cb) override;
+ void UnregisterPlayer(int registration_id) override;
// Returns a MediaCrypto object if it's already created. Returns a null object
// otherwise.
@@ -113,19 +121,51 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
// Called after a MediaCrypto object is created.
void OnMediaCryptoReady(JNIEnv* env, jobject j_media_drm);
- // Callbacks for firing session events.
- void OnSessionCreated(JNIEnv* env,
- jobject j_media_drm,
- jint j_session_id,
- jstring j_web_session_id);
+ // Callbacks to resolve the promise for |promise_id|.
+ void OnPromiseResolved(JNIEnv* env, jobject j_media_drm, jint j_promise_id);
+ void OnPromiseResolvedWithSession(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_promise_id,
+ jbyteArray j_session_id);
+
+ // Callback to reject the promise for |promise_id| with |error_message|.
+ // Note: No |system_error| is available from MediaDrm.
+ // TODO(xhwang): Implement Exception code.
+ void OnPromiseRejected(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_promise_id,
+ jstring j_error_message);
+
+ // Session event callbacks.
+ // Note: Session expiration update is not supported by MediaDrm.
+
void OnSessionMessage(JNIEnv* env,
jobject j_media_drm,
- jint j_session_id,
+ jbyteArray j_session_id,
jbyteArray j_message,
- jstring j_destination_url);
- void OnSessionReady(JNIEnv* env, jobject j_media_drm, jint j_session_id);
- void OnSessionClosed(JNIEnv* env, jobject j_media_drm, jint j_session_id);
- void OnSessionError(JNIEnv* env, jobject j_media_drm, jint j_session_id);
+ jstring j_legacy_destination_url);
+ void OnSessionClosed(JNIEnv* env,
+ jobject j_media_drm,
+ jbyteArray j_session_id);
+
+ // Note: Key ID is not available in MediaDrm, so only a generic |j_key_status|
+ // and |has_additional_usable_key| are returned.
+ void OnSessionKeysChange(JNIEnv* env,
+ jobject j_media_drm,
+ jbyteArray j_session_id,
+ bool has_additional_usable_key,
+ jint j_key_status);
+
+ // Called by the CDM when an error occurred in session |j_session_id|
+ // unrelated to one of the MediaKeys calls that accept a |promise|.
+ // Note:
+ // - This method is only for supporting prefixed EME API.
+ // - This method will be ignored by unprefixed EME. All errors reported
+ // in this method should probably also be reported by one of other methods.
+ void OnLegacySessionError(JNIEnv* env,
+ jobject j_media_drm,
+ jbyteArray j_session_id,
+ jstring j_error_message);
// Reset the device credentials.
void ResetDeviceCredentials(const ResetCredentialsCB& callback);
@@ -139,11 +179,12 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
private:
MediaDrmBridge(const std::vector<uint8>& scheme_uuid,
- const SessionCreatedCB& session_created_cb,
const SessionMessageCB& session_message_cb,
- const SessionReadyCB& session_ready_cb,
const SessionClosedCB& session_closed_cb,
- const SessionErrorCB& session_error_cb);
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb);
+
+ static bool IsSecureDecoderRequired(SecurityLevel security_level);
// Get the security level of the media.
SecurityLevel GetSecurityLevel();
@@ -155,11 +196,10 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
base::android::ScopedJavaGlobalRef<jobject> j_media_drm_;
// Callbacks for firing session events.
- SessionCreatedCB session_created_cb_;
SessionMessageCB session_message_cb_;
- SessionReadyCB session_ready_cb_;
SessionClosedCB session_closed_cb_;
- SessionErrorCB session_error_cb_;
+ LegacySessionErrorCB legacy_session_error_cb_;
+ SessionKeysChangeCB session_keys_change_cb_;
base::Closure media_crypto_ready_cb_;
@@ -167,6 +207,8 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
PlayerTrackerImpl player_tracker_;
+ CdmPromiseAdapter cdm_promise_adapter_;
+
DISALLOW_COPY_AND_ASSIGN(MediaDrmBridge);
};
diff --git a/chromium/media/base/android/media_drm_bridge_delegate.cc b/chromium/media/base/android/media_drm_bridge_delegate.cc
new file mode 100644
index 00000000000..5630d4af0da
--- /dev/null
+++ b/chromium/media/base/android/media_drm_bridge_delegate.cc
@@ -0,0 +1,27 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_drm_bridge_delegate.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+MediaDrmBridgeDelegate::MediaDrmBridgeDelegate() {
+}
+
+MediaDrmBridgeDelegate::~MediaDrmBridgeDelegate() {
+}
+
+bool MediaDrmBridgeDelegate::OnCreateSession(
+ const EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ std::vector<uint8_t>* init_data_out,
+ std::vector<std::string>* optional_parameters_out) {
+ DCHECK(init_data_out->empty());
+ DCHECK(optional_parameters_out->empty());
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_drm_bridge_delegate.h b/chromium/media/base/android/media_drm_bridge_delegate.h
new file mode 100644
index 00000000000..7a18dace989
--- /dev/null
+++ b/chromium/media/base/android/media_drm_bridge_delegate.h
@@ -0,0 +1,46 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_DELEGATE_H_
+#define MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_DELEGATE_H_
+
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "media/base/android/media_client_android.h"
+#include "media/base/eme_constants.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Allows embedders to modify the Android MediaDrm flow. Delegates are
+// registered to a specific key system.
+class MEDIA_EXPORT MediaDrmBridgeDelegate {
+ public:
+ MediaDrmBridgeDelegate();
+ virtual ~MediaDrmBridgeDelegate();
+
+ // Returns the UUID of the DRM scheme that this delegate applies to.
+ virtual const UUID GetUUID() const = 0;
+
+ // Invoked from CreateSession.
+ // If |init_data_out| is filled, it replaces |init_data| to send to the
+ // MediaDrm instance.
+ // If |optional_parameters_out| is filled, it is expected to be an
+ // even-length list of (key, value) pairs to send to the MediaDrm instance.
+ // Returns false if the request should be rejected.
+ virtual bool OnCreateSession(
+ const EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ std::vector<uint8_t>* init_data_out,
+ std::vector<std::string>* optional_parameters_out);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MediaDrmBridgeDelegate);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_DELEGATE_H_
diff --git a/chromium/media/base/android/media_drm_bridge_unittest.cc b/chromium/media/base/android/media_drm_bridge_unittest.cc
index 1a4c01426ae..e4c0800030a 100644
--- a/chromium/media/base/android/media_drm_bridge_unittest.cc
+++ b/chromium/media/base/android/media_drm_bridge_unittest.cc
@@ -12,14 +12,14 @@
namespace media {
-#define EXPECT_TRUE_IF_AVAILABLE(a) \
- do { \
- if (!MediaDrmBridge::IsAvailable()) { \
- VLOG(0) << "MediaDrm not supported on device."; \
- EXPECT_FALSE(a); \
- } else { \
- EXPECT_TRUE(a); \
- } \
+#define EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(a) \
+ do { \
+ if (!MediaDrmBridge::IsKeySystemSupported(kWidevineKeySystem)) { \
+ VLOG(0) << "Widevine not supported on device."; \
+ EXPECT_FALSE(a); \
+ } else { \
+ EXPECT_TRUE(a); \
+ } \
} while (0)
const char kAudioMp4[] = "audio/mp4";
@@ -34,10 +34,6 @@ const MediaDrmBridge::SecurityLevel kL3 = MediaDrmBridge::SECURITY_LEVEL_3;
// Helper functions to avoid typing "MediaDrmBridge::" in tests.
-static bool IsKeySystemSupported(const std::string& key_system) {
- return MediaDrmBridge::IsKeySystemSupported(key_system);
-}
-
static bool IsKeySystemSupportedWithType(
const std::string& key_system,
const std::string& container_mime_type) {
@@ -45,42 +41,21 @@ static bool IsKeySystemSupportedWithType(
container_mime_type);
}
-static bool IsSecurityLevelSupported(
- const std::string& key_system,
- MediaDrmBridge::SecurityLevel security_level) {
- return MediaDrmBridge::IsSecurityLevelSupported(key_system, security_level);
-}
-
-TEST(MediaDrmBridgeTest, IsSecurityLevelSupported_Widevine) {
- EXPECT_FALSE(IsSecurityLevelSupported(kWidevineKeySystem, kLNone));
- // We test "L3" fully. But for "L1" we don't check the result as it depends on
- // whether the test device supports "L1".
- EXPECT_TRUE_IF_AVAILABLE(IsSecurityLevelSupported(kWidevineKeySystem, kL3));
- IsSecurityLevelSupported(kWidevineKeySystem, kL1);
-}
-
-// Invalid keysytem is NOT supported regardless whether MediaDrm is available.
-TEST(MediaDrmBridgeTest, IsSecurityLevelSupported_InvalidKeySystem) {
- EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kLNone));
- EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kL1));
- EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kL3));
-}
-
TEST(MediaDrmBridgeTest, IsKeySystemSupported_Widevine) {
- EXPECT_TRUE_IF_AVAILABLE(IsKeySystemSupported(kWidevineKeySystem));
-
// TODO(xhwang): Enable when b/13564917 is fixed.
// EXPECT_TRUE_IF_AVAILABLE(
// IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioMp4));
- EXPECT_TRUE_IF_AVAILABLE(
+ EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(
IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoMp4));
if (base::android::BuildInfo::GetInstance()->sdk_int() <= 19) {
EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioWebM));
EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoWebM));
} else {
- EXPECT_TRUE(IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioWebM));
- EXPECT_TRUE(IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoWebM));
+ EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(
+ IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioWebM));
+ EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(
+ IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoWebM));
}
EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, "unknown"));
@@ -88,9 +63,9 @@ TEST(MediaDrmBridgeTest, IsKeySystemSupported_Widevine) {
EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, "audio/mp3"));
}
-// Invalid keysytem is NOT supported regardless whether MediaDrm is available.
+// Invalid key system is NOT supported regardless whether MediaDrm is available.
TEST(MediaDrmBridgeTest, IsKeySystemSupported_InvalidKeySystem) {
- EXPECT_FALSE(IsKeySystemSupported(kInvalidKeySystem));
+ EXPECT_FALSE(MediaDrmBridge::IsKeySystemSupported(kInvalidKeySystem));
EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kAudioMp4));
EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kVideoMp4));
EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kAudioWebM));
@@ -100,4 +75,28 @@ TEST(MediaDrmBridgeTest, IsKeySystemSupported_InvalidKeySystem) {
EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, "audio/mp3"));
}
+TEST(MediaDrmBridgeTest, CreateWithoutSessionSupport_Widevine) {
+ EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(
+ MediaDrmBridge::CreateWithoutSessionSupport(kWidevineKeySystem));
+}
+
+// Invalid key system is NOT supported regardless whether MediaDrm is available.
+TEST(MediaDrmBridgeTest, CreateWithoutSessionSupport_InvalidKeySystem) {
+ EXPECT_FALSE(MediaDrmBridge::CreateWithoutSessionSupport(kInvalidKeySystem));
+}
+
+TEST(MediaDrmBridgeTest, SetSecurityLevel_Widevine) {
+ scoped_ptr<MediaDrmBridge> media_drm_bridge =
+ MediaDrmBridge::CreateWithoutSessionSupport(kWidevineKeySystem);
+ EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(media_drm_bridge);
+ if (!media_drm_bridge)
+ return;
+
+ EXPECT_FALSE(media_drm_bridge->SetSecurityLevel(kLNone));
+ // We test "L3" fully. But for "L1" we don't check the result as it depends on
+ // whether the test device supports "L1".
+ EXPECT_TRUE(media_drm_bridge->SetSecurityLevel(kL3));
+ media_drm_bridge->SetSecurityLevel(kL1);
+}
+
} // namespace media
diff --git a/chromium/media/base/android/media_player_android.cc b/chromium/media/base/android/media_player_android.cc
index 954ba96c70d..60e1dfc0f82 100644
--- a/chromium/media/base/android/media_player_android.cc
+++ b/chromium/media/base/android/media_player_android.cc
@@ -6,7 +6,8 @@
#include "base/android/jni_android.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
@@ -21,13 +22,19 @@ MediaPlayerAndroid::MediaPlayerAndroid(
player_id_(player_id),
manager_(manager),
frame_url_(frame_url),
+ is_audible_(false),
weak_factory_(this) {
- listener_.reset(new MediaPlayerListener(base::MessageLoopProxy::current(),
+ listener_.reset(new MediaPlayerListener(base::ThreadTaskRunnerHandle::Get(),
weak_factory_.GetWeakPtr()));
}
MediaPlayerAndroid::~MediaPlayerAndroid() {}
+// For most subclasses we can delete on the caller thread.
+void MediaPlayerAndroid::DeleteOnCorrectThread() {
+ delete this;
+}
+
GURL MediaPlayerAndroid::GetUrl() {
return GURL();
}
@@ -38,7 +45,7 @@ GURL MediaPlayerAndroid::GetFirstPartyForCookies() {
void MediaPlayerAndroid::SetCdm(BrowserCdm* /* cdm */) {
// Players that support EME should override this.
- NOTREACHED() << "EME not supported on base MediaPlayerAndroid class.";
+ LOG(ERROR) << "EME not supported on base MediaPlayerAndroid class.";
return;
}
@@ -79,5 +86,16 @@ void MediaPlayerAndroid::DetachListener() {
listener_->ReleaseMediaPlayerListenerResources();
}
+void MediaPlayerAndroid::DestroyListenerOnUIThread() {
+ weak_factory_.InvalidateWeakPtrs();
+ listener_.reset();
+}
+
+void MediaPlayerAndroid::SetAudible(bool is_audible) {
+ if (is_audible_ != is_audible) {
+ is_audible_ = is_audible;
+ manager_->OnAudibleStateChanged(player_id(), is_audible_);
+ }
+}
} // namespace media
diff --git a/chromium/media/base/android/media_player_android.h b/chromium/media/base/android/media_player_android.h
index cd5a92f18e1..8928222156b 100644
--- a/chromium/media/base/android/media_player_android.h
+++ b/chromium/media/base/android/media_player_android.h
@@ -39,6 +39,10 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// Callback when the player needs decoding resources.
typedef base::Callback<void(int player_id)> RequestMediaResourcesCB;
+ // Virtual destructor.
+ // For most subclasses we can delete on the caller thread.
+ virtual void DeleteOnCorrectThread();
+
// Passing an external java surface object to the player.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) = 0;
@@ -103,6 +107,12 @@ class MEDIA_EXPORT MediaPlayerAndroid {
void AttachListener(jobject j_media_player);
void DetachListener();
+ // When destroying a subclassed object on a non-UI thread
+ // it is still required to destroy the |listener_| related stuff
+ // on the UI thread.
+ void DestroyListenerOnUIThread();
+ void SetAudible(bool is_audible);
+
MediaPlayerManager* manager() { return manager_; }
RequestMediaResourcesCB request_media_resources_cb_;
@@ -122,6 +132,9 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// Listener object that listens to all the media player events.
scoped_ptr<MediaPlayerListener> listener_;
+ // Maintains the audible state of the player, true if it is playing sound.
+ bool is_audible_;
+
// Weak pointer passed to |listener_| for callbacks.
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<MediaPlayerAndroid> weak_factory_;
diff --git a/chromium/media/base/android/media_player_bridge.cc b/chromium/media/base/android/media_player_bridge.cc
index 37ea8c128a7..ed8854f1b68 100644
--- a/chromium/media/base/android/media_player_bridge.cc
+++ b/chromium/media/base/android/media_player_bridge.cc
@@ -105,21 +105,6 @@ void MediaPlayerBridge::CreateJavaMediaPlayerBridge() {
AttachListener(j_media_player_bridge_.obj());
}
-void MediaPlayerBridge::SetJavaMediaPlayerBridge(
- jobject j_media_player_bridge) {
- JNIEnv* env = base::android::AttachCurrentThread();
- CHECK(env);
-
- j_media_player_bridge_.Reset(env, j_media_player_bridge);
-}
-
-base::android::ScopedJavaLocalRef<jobject> MediaPlayerBridge::
- GetJavaMediaPlayerBridge() {
- base::android::ScopedJavaLocalRef<jobject> j_bridge(
- j_media_player_bridge_);
- return j_bridge;
-}
-
void MediaPlayerBridge::SetDuration(base::TimeDelta duration) {
duration_ = duration;
}
@@ -370,6 +355,8 @@ void MediaPlayerBridge::Release() {
if (j_media_player_bridge_.is_null())
return;
+ SetAudible(false);
+
time_update_timer_.Stop();
if (prepared_) {
pending_seek_ = GetCurrentTime();
@@ -386,15 +373,22 @@ void MediaPlayerBridge::Release() {
}
void MediaPlayerBridge::SetVolume(double volume) {
- if (j_media_player_bridge_.is_null()) {
- volume_ = volume;
+ volume_ = volume;
+
+ if (j_media_player_bridge_.is_null())
return;
- }
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
+
+ // Update the audible state if we are playing.
+ jboolean is_playing = Java_MediaPlayerBridge_isPlaying(
+ env, j_media_player_bridge_.obj());
+ if (is_playing)
+ SetAudible(volume_ > 0);
+
Java_MediaPlayerBridge_setVolume(
- env, j_media_player_bridge_.obj(), volume);
+ env, j_media_player_bridge_.obj(), volume_);
}
void MediaPlayerBridge::OnVideoSizeChanged(int width, int height) {
@@ -404,11 +398,13 @@ void MediaPlayerBridge::OnVideoSizeChanged(int width, int height) {
}
void MediaPlayerBridge::OnPlaybackComplete() {
+ SetAudible(false);
time_update_timer_.Stop();
MediaPlayerAndroid::OnPlaybackComplete();
}
void MediaPlayerBridge::OnMediaInterrupted() {
+ SetAudible(false);
time_update_timer_.Stop();
MediaPlayerAndroid::OnMediaInterrupted();
}
@@ -468,9 +464,13 @@ void MediaPlayerBridge::StartInternal() {
base::TimeDelta::FromMilliseconds(kTimeUpdateInterval),
this, &MediaPlayerBridge::OnTimeUpdateTimerFired);
}
+
+ SetAudible(volume_ > 0);
}
void MediaPlayerBridge::PauseInternal() {
+ SetAudible(false);
+
JNIEnv* env = base::android::AttachCurrentThread();
Java_MediaPlayerBridge_pause(env, j_media_player_bridge_.obj());
time_update_timer_.Stop();
@@ -504,9 +504,7 @@ void MediaPlayerBridge::OnTimeUpdateTimerFired() {
}
bool MediaPlayerBridge::RegisterMediaPlayerBridge(JNIEnv* env) {
- bool ret = RegisterNativesImpl(env);
- DCHECK(g_MediaPlayerBridge_clazz);
- return ret;
+ return RegisterNativesImpl(env);
}
bool MediaPlayerBridge::CanPause() {
diff --git a/chromium/media/base/android/media_player_bridge.h b/chromium/media/base/android/media_player_bridge.h
index ad5dee748cf..291aa97959d 100644
--- a/chromium/media/base/android/media_player_bridge.h
+++ b/chromium/media/base/android/media_player_bridge.h
@@ -50,35 +50,33 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
const RequestMediaResourcesCB& request_media_resources_cb,
const GURL& frame_url,
bool allow_credentials);
- virtual ~MediaPlayerBridge();
+ ~MediaPlayerBridge() override;
// Initialize this object and extract the metadata from the media.
virtual void Initialize();
// MediaPlayerAndroid implementation.
- virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) override;
- virtual void Start() override;
- virtual void Pause(bool is_media_related_action) override;
- virtual void SeekTo(base::TimeDelta timestamp) override;
- virtual void Release() override;
- virtual void SetVolume(double volume) override;
- virtual int GetVideoWidth() override;
- virtual int GetVideoHeight() override;
- virtual base::TimeDelta GetCurrentTime() override;
- virtual base::TimeDelta GetDuration() override;
- virtual bool IsPlaying() override;
- virtual bool CanPause() override;
- virtual bool CanSeekForward() override;
- virtual bool CanSeekBackward() override;
- virtual bool IsPlayerReady() override;
- virtual GURL GetUrl() override;
- virtual GURL GetFirstPartyForCookies() override;
+ void SetVideoSurface(gfx::ScopedJavaSurface surface) override;
+ void Start() override;
+ void Pause(bool is_media_related_action) override;
+ void SeekTo(base::TimeDelta timestamp) override;
+ void Release() override;
+ void SetVolume(double volume) override;
+ int GetVideoWidth() override;
+ int GetVideoHeight() override;
+ base::TimeDelta GetCurrentTime() override;
+ base::TimeDelta GetDuration() override;
+ bool IsPlaying() override;
+ bool CanPause() override;
+ bool CanSeekForward() override;
+ bool CanSeekBackward() override;
+ bool IsPlayerReady() override;
+ GURL GetUrl() override;
+ GURL GetFirstPartyForCookies() override;
void OnDidSetDataUriDataSource(JNIEnv* env, jobject obj, jboolean success);
protected:
- void SetJavaMediaPlayerBridge(jobject j_media_player_bridge);
- base::android::ScopedJavaLocalRef<jobject> GetJavaMediaPlayerBridge();
void SetDuration(base::TimeDelta time);
virtual void PendingSeekInternal(const base::TimeDelta& time);
@@ -89,10 +87,10 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual void Prepare();
// MediaPlayerAndroid implementation.
- virtual void OnVideoSizeChanged(int width, int height) override;
- virtual void OnPlaybackComplete() override;
- virtual void OnMediaInterrupted() override;
- virtual void OnMediaPrepared() override;
+ void OnVideoSizeChanged(int width, int height) override;
+ void OnPlaybackComplete() override;
+ void OnMediaInterrupted() override;
+ void OnMediaPrepared() override;
// Create the corresponding Java class instance.
virtual void CreateJavaMediaPlayerBridge();
diff --git a/chromium/media/base/android/media_player_listener.cc b/chromium/media/base/android/media_player_listener.cc
index b4261a84f9e..861a34f7b0b 100644
--- a/chromium/media/base/android/media_player_listener.cc
+++ b/chromium/media/base/android/media_player_listener.cc
@@ -94,9 +94,7 @@ void MediaPlayerListener::OnMediaInterrupted(
}
bool MediaPlayerListener::RegisterMediaPlayerListener(JNIEnv* env) {
- bool ret = RegisterNativesImpl(env);
- DCHECK(g_MediaPlayerListener_clazz);
- return ret;
+ return RegisterNativesImpl(env);
}
} // namespace media
diff --git a/chromium/media/base/android/media_player_manager.h b/chromium/media/base/android/media_player_manager.h
index 58a712f383b..feac84e9193 100644
--- a/chromium/media/base/android/media_player_manager.h
+++ b/chromium/media/base/android/media_player_manager.h
@@ -63,6 +63,13 @@ class MEDIA_EXPORT MediaPlayerManager {
// Called when video size has changed. Args: player ID, width, height.
virtual void OnVideoSizeChanged(int player_id, int width, int height) = 0;
+ // Called when the player thinks it stopped or started making sound.
+ virtual void OnAudibleStateChanged(int player_id, bool is_audible_now) = 0;
+
+ // Called when the player pauses as a new key is required to decrypt
+ // encrypted content.
+ virtual void OnWaitingForDecryptionKey(int player_id) = 0;
+
// Returns the player that's in the fullscreen mode currently.
virtual MediaPlayerAndroid* GetFullscreenPlayer() = 0;
@@ -71,12 +78,6 @@ class MEDIA_EXPORT MediaPlayerManager {
// Called by the player to get a hardware protected surface.
virtual void RequestFullScreen(int player_id) = 0;
-
-#if defined(VIDEO_HOLE)
- // Returns true if a media player should use video-overlay for the embedded
- // encrypted video.
- virtual bool ShouldUseVideoOverlayForEmbeddedEncryptedVideo() = 0;
-#endif // defined(VIDEO_HOLE)
};
} // namespace media
diff --git a/chromium/media/base/android/media_source_player.cc b/chromium/media/base/android/media_source_player.cc
index 2be0d8b2347..7e8f49a4613 100644
--- a/chromium/media/base/android/media_source_player.cc
+++ b/chromium/media/base/android/media_source_player.cc
@@ -12,11 +12,10 @@
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/android/audio_decoder_job.h"
-#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/video_decoder_job.h"
@@ -121,15 +120,6 @@ void MediaSourcePlayer::Start() {
playing_ = true;
- bool request_fullscreen = IsProtectedSurfaceRequired();
-#if defined(VIDEO_HOLE)
- // Skip to request fullscreen when hole-punching is used.
- request_fullscreen = request_fullscreen &&
- !manager()->ShouldUseVideoOverlayForEmbeddedEncryptedVideo();
-#endif // defined(VIDEO_HOLE)
- if (request_fullscreen)
- manager()->RequestFullScreen(player_id());
-
StartInternal();
}
@@ -143,6 +133,8 @@ void MediaSourcePlayer::Pause(bool is_media_related_action) {
// MediaDecoderCallback() is called.
playing_ = false;
start_time_ticks_ = base::TimeTicks();
+
+ SetAudible(false);
}
bool MediaSourcePlayer::IsPlaying() {
@@ -194,6 +186,8 @@ void MediaSourcePlayer::Release() {
playing_ = false;
decoder_starvation_callback_.Cancel();
+
+ SetAudible(false);
DetachListener();
}
@@ -238,6 +232,7 @@ void MediaSourcePlayer::OnDemuxerConfigsAvailable(
const DemuxerConfigs& configs) {
DVLOG(1) << __FUNCTION__;
DCHECK(!HasAudio() && !HasVideo());
+
duration_ = configs.duration;
audio_decoder_job_->SetDemuxerConfigs(configs);
@@ -399,6 +394,7 @@ void MediaSourcePlayer::ProcessPendingEvents() {
if (IsEventPending(PREFETCH_REQUEST_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling PREFETCH_REQUEST_EVENT.";
+
int count = (AudioFinished() ? 0 : 1) + (VideoFinished() ? 0 : 1);
// It is possible that all streams have finished decode, yet starvation
@@ -493,12 +489,18 @@ void MediaSourcePlayer::MediaDecoderCallback(
return;
}
- if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM)
+ if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM) {
+ if (is_audio)
+ SetAudible(false);
return;
+ }
if (!playing_) {
if (is_clock_manager)
interpolator_.StopInterpolating();
+
+ if (is_audio)
+ SetAudible(false);
return;
}
@@ -507,7 +509,11 @@ void MediaSourcePlayer::MediaDecoderCallback(
DVLOG(2) << __FUNCTION__ << ": Key was added during decoding.";
ResumePlaybackAfterKeyAdded();
} else {
+ if (is_audio)
+ SetAudible(false);
+
is_waiting_for_key_ = true;
+ manager()->OnWaitingForDecryptionKey(player_id());
}
return;
}
@@ -522,11 +528,14 @@ void MediaSourcePlayer::MediaDecoderCallback(
// and video in one file. If we separate them, we should be able to remove a
// lot of duplication.
- // If the status is MEDIA_CODEC_STOPPED, stop decoding new data. The player is
+ // If the status is MEDIA_CODEC_ABORT, stop decoding new data. The player is
// in the middle of a seek or stop event and needs to wait for the IPCs to
// come.
- if (status == MEDIA_CODEC_STOPPED)
+ if (status == MEDIA_CODEC_ABORT) {
+ if (is_audio)
+ SetAudible(false);
return;
+ }
if (prerolling_ && IsPrerollFinished(is_audio)) {
if (IsPrerollFinished(!is_audio)) {
@@ -536,6 +545,13 @@ void MediaSourcePlayer::MediaDecoderCallback(
return;
}
+ // We successfully decoded a frame and going to the next one.
+ // Set the audible state.
+ if (is_audio) {
+ bool is_audible = !prerolling_ && audio_decoder_job_->volume() > 0;
+ SetAudible(is_audible);
+ }
+
if (is_clock_manager) {
// If we have a valid timestamp, start the starvation callback. Otherwise,
// reset the |start_time_ticks_| so that the next frame will not suffer
@@ -564,18 +580,25 @@ void MediaSourcePlayer::DecodeMoreAudio() {
DCHECK(!audio_decoder_job_->is_decoding());
DCHECK(!AudioFinished());
- if (audio_decoder_job_->Decode(
+ MediaDecoderJob::MediaDecoderJobStatus status = audio_decoder_job_->Decode(
start_time_ticks_,
start_presentation_timestamp_,
- base::Bind(&MediaSourcePlayer::MediaDecoderCallback, weak_this_, true))) {
- TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreAudio",
- audio_decoder_job_.get());
- return;
- }
+ base::Bind(&MediaSourcePlayer::MediaDecoderCallback, weak_this_, true));
- is_waiting_for_audio_decoder_ = true;
- if (!IsEventPending(DECODER_CREATION_EVENT_PENDING))
- SetPendingEvent(DECODER_CREATION_EVENT_PENDING);
+ switch (status) {
+ case MediaDecoderJob::STATUS_SUCCESS:
+ TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreAudio",
+ audio_decoder_job_.get());
+ break;
+ case MediaDecoderJob::STATUS_KEY_FRAME_REQUIRED:
+ NOTREACHED();
+ break;
+ case MediaDecoderJob::STATUS_FAILURE:
+ is_waiting_for_audio_decoder_ = true;
+ if (!IsEventPending(DECODER_CREATION_EVENT_PENDING))
+ SetPendingEvent(DECODER_CREATION_EVENT_PENDING);
+ break;
+ }
}
void MediaSourcePlayer::DecodeMoreVideo() {
@@ -583,25 +606,26 @@ void MediaSourcePlayer::DecodeMoreVideo() {
DCHECK(!video_decoder_job_->is_decoding());
DCHECK(!VideoFinished());
- if (video_decoder_job_->Decode(
+ MediaDecoderJob::MediaDecoderJobStatus status = video_decoder_job_->Decode(
start_time_ticks_,
start_presentation_timestamp_,
base::Bind(&MediaSourcePlayer::MediaDecoderCallback, weak_this_,
- false))) {
- TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreVideo",
- video_decoder_job_.get());
- return;
- }
+ false));
- // If the decoder is waiting for iframe, trigger a browser seek.
- if (!video_decoder_job_->next_video_data_is_iframe()) {
- BrowserSeekToCurrentTime();
- return;
+ switch (status) {
+ case MediaDecoderJob::STATUS_SUCCESS:
+ TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreVideo",
+ video_decoder_job_.get());
+ break;
+ case MediaDecoderJob::STATUS_KEY_FRAME_REQUIRED:
+ BrowserSeekToCurrentTime();
+ break;
+ case MediaDecoderJob::STATUS_FAILURE:
+ is_waiting_for_video_decoder_ = true;
+ if (!IsEventPending(DECODER_CREATION_EVENT_PENDING))
+ SetPendingEvent(DECODER_CREATION_EVENT_PENDING);
+ break;
}
-
- is_waiting_for_video_decoder_ = true;
- if (!IsEventPending(DECODER_CREATION_EVENT_PENDING))
- SetPendingEvent(DECODER_CREATION_EVENT_PENDING);
}
void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
@@ -639,6 +663,14 @@ bool MediaSourcePlayer::VideoFinished() {
void MediaSourcePlayer::OnDecoderStarved() {
DVLOG(1) << __FUNCTION__;
+
+ if (HasAudio()) {
+ // If the starvation timer fired but there are no encoded frames
+ // in the queue we believe the demuxer (i.e. renderer process) froze.
+ if (!audio_decoder_job_->HasData())
+ SetAudible(false);
+ }
+
SetPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -675,11 +707,6 @@ void MediaSourcePlayer::StartStarvationCallback(
FROM_HERE, decoder_starvation_callback_.callback(), timeout);
}
-bool MediaSourcePlayer::IsProtectedSurfaceRequired() {
- return video_decoder_job_->is_content_encrypted() &&
- drm_bridge_ && drm_bridge_->IsProtectedSurfaceRequired();
-}
-
void MediaSourcePlayer::OnPrefetchDone() {
DVLOG(1) << __FUNCTION__;
DCHECK(!audio_decoder_job_->is_decoding());
diff --git a/chromium/media/base/android/media_source_player.h b/chromium/media/base/android/media_source_player.h
index e0922b6aed5..ab3bda3e72c 100644
--- a/chromium/media/base/android/media_source_player.h
+++ b/chromium/media/base/android/media_source_player.h
@@ -43,32 +43,31 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
const RequestMediaResourcesCB& request_media_resources_cb,
scoped_ptr<DemuxerAndroid> demuxer,
const GURL& frame_url);
- virtual ~MediaSourcePlayer();
+ ~MediaSourcePlayer() override;
// MediaPlayerAndroid implementation.
- virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) override;
- virtual void Start() override;
- virtual void Pause(bool is_media_related_action) override;
- virtual void SeekTo(base::TimeDelta timestamp) override;
- virtual void Release() override;
- virtual void SetVolume(double volume) override;
- virtual int GetVideoWidth() override;
- virtual int GetVideoHeight() override;
- virtual base::TimeDelta GetCurrentTime() override;
- virtual base::TimeDelta GetDuration() override;
- virtual bool IsPlaying() override;
- virtual bool CanPause() override;
- virtual bool CanSeekForward() override;
- virtual bool CanSeekBackward() override;
- virtual bool IsPlayerReady() override;
- virtual void SetCdm(BrowserCdm* cdm) override;
+ void SetVideoSurface(gfx::ScopedJavaSurface surface) override;
+ void Start() override;
+ void Pause(bool is_media_related_action) override;
+ void SeekTo(base::TimeDelta timestamp) override;
+ void Release() override;
+ void SetVolume(double volume) override;
+ int GetVideoWidth() override;
+ int GetVideoHeight() override;
+ base::TimeDelta GetCurrentTime() override;
+ base::TimeDelta GetDuration() override;
+ bool IsPlaying() override;
+ bool CanPause() override;
+ bool CanSeekForward() override;
+ bool CanSeekBackward() override;
+ bool IsPlayerReady() override;
+ void SetCdm(BrowserCdm* cdm) override;
// DemuxerAndroidClient implementation.
- virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) override;
- virtual void OnDemuxerDataAvailable(const DemuxerData& params) override;
- virtual void OnDemuxerSeekDone(
- base::TimeDelta actual_browser_seek_time) override;
- virtual void OnDemuxerDurationChanged(base::TimeDelta duration) override;
+ void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) override;
+ void OnDemuxerDataAvailable(const DemuxerData& params) override;
+ void OnDemuxerSeekDone(base::TimeDelta actual_browser_seek_time) override;
+ void OnDemuxerDurationChanged(base::TimeDelta duration) override;
private:
friend class MediaSourcePlayerTest;
@@ -146,10 +145,6 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// since last keyframe. See http://crbug.com/304234.
void BrowserSeekToCurrentTime();
- // Helper function to determine whether a protected surface is needed for
- // video playback.
- bool IsProtectedSurfaceRequired();
-
// Called when a MediaDecoderJob finishes prefetching data. Once all
// MediaDecoderJobs have prefetched data, then this method updates
// |start_time_ticks_| and |start_presentation_timestamp_| so that video can
@@ -273,9 +268,9 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
bool prerolling_;
// Weak pointer passed to media decoder jobs for callbacks.
+ base::WeakPtr<MediaSourcePlayer> weak_this_;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<MediaSourcePlayer> weak_factory_;
- base::WeakPtr<MediaSourcePlayer> weak_this_;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayer);
};
diff --git a/chromium/media/base/android/media_source_player_unittest.cc b/chromium/media/base/android/media_source_player_unittest.cc
index 376e0b1fdfd..f37cf205a54 100644
--- a/chromium/media/base/android/media_source_player_unittest.cc
+++ b/chromium/media/base/android/media_source_player_unittest.cc
@@ -46,46 +46,45 @@ class MockMediaPlayerManager : public MediaPlayerManager {
playback_completed_(false),
num_resources_requested_(0),
num_metadata_changes_(0),
- timestamp_updated_(false) {}
- virtual ~MockMediaPlayerManager() {}
+ timestamp_updated_(false),
+ is_audible_(false),
+ is_delay_expired_(false) {}
+ ~MockMediaPlayerManager() override {}
// MediaPlayerManager implementation.
- virtual MediaResourceGetter* GetMediaResourceGetter() override {
- return NULL;
- }
- virtual MediaUrlInterceptor* GetMediaUrlInterceptor() override {
- return NULL;
- }
- virtual void OnTimeUpdate(int player_id,
- base::TimeDelta current_time,
- base::TimeTicks current_time_ticks) override {
+ MediaResourceGetter* GetMediaResourceGetter() override { return NULL; }
+ MediaUrlInterceptor* GetMediaUrlInterceptor() override { return NULL; }
+ void OnTimeUpdate(int player_id,
+ base::TimeDelta current_time,
+ base::TimeTicks current_time_ticks) override {
timestamp_updated_ = true;
}
- virtual void OnMediaMetadataChanged(
- int player_id, base::TimeDelta duration, int width, int height,
- bool success) override {
+ void OnMediaMetadataChanged(int player_id,
+ base::TimeDelta duration,
+ int width,
+ int height,
+ bool success) override {
num_metadata_changes_++;
}
- virtual void OnPlaybackComplete(int player_id) override {
+ void OnPlaybackComplete(int player_id) override {
playback_completed_ = true;
if (message_loop_->is_running())
message_loop_->Quit();
}
- virtual void OnMediaInterrupted(int player_id) override {}
- virtual void OnBufferingUpdate(int player_id, int percentage) override {}
- virtual void OnSeekComplete(int player_id,
- const base::TimeDelta& current_time) override {}
- virtual void OnError(int player_id, int error) override {}
- virtual void OnVideoSizeChanged(int player_id, int width,
- int height) override {}
- virtual MediaPlayerAndroid* GetFullscreenPlayer() override { return NULL; }
- virtual MediaPlayerAndroid* GetPlayer(int player_id) override { return NULL; }
- virtual void RequestFullScreen(int player_id) override {}
-#if defined(VIDEO_HOLE)
- virtual bool ShouldUseVideoOverlayForEmbeddedEncryptedVideo() override {
- return false;
+ void OnMediaInterrupted(int player_id) override {}
+ void OnBufferingUpdate(int player_id, int percentage) override {}
+ void OnSeekComplete(int player_id,
+ const base::TimeDelta& current_time) override {}
+ void OnError(int player_id, int error) override {}
+ void OnVideoSizeChanged(int player_id, int width, int height) override {}
+ void OnWaitingForDecryptionKey(int player_id) override {}
+ MediaPlayerAndroid* GetFullscreenPlayer() override { return NULL; }
+ MediaPlayerAndroid* GetPlayer(int player_id) override { return NULL; }
+ void RequestFullScreen(int player_id) override {}
+
+ void OnAudibleStateChanged(int player_id, bool is_audible_now) override {
+ is_audible_ = is_audible_now;
}
-#endif // defined(VIDEO_HOLE)
bool playback_completed() const {
return playback_completed_;
@@ -111,6 +110,18 @@ class MockMediaPlayerManager : public MediaPlayerManager {
timestamp_updated_ = false;
}
+ bool is_audible() const {
+ return is_audible_;
+ }
+
+ bool is_delay_expired() const {
+ return is_delay_expired_;
+ }
+
+ void SetDelayExpired(bool value) {
+ is_delay_expired_ = value;
+ }
+
private:
base::MessageLoop* message_loop_;
bool playback_completed_;
@@ -120,6 +131,10 @@ class MockMediaPlayerManager : public MediaPlayerManager {
int num_metadata_changes_;
// Playback timestamp was updated.
bool timestamp_updated_;
+ // Audible state of the pipeline
+ bool is_audible_;
+ // Helper flag to ensure delay for WaitForDelay().
+ bool is_delay_expired_;
DISALLOW_COPY_AND_ASSIGN(MockMediaPlayerManager);
};
@@ -131,16 +146,16 @@ class MockDemuxerAndroid : public DemuxerAndroid {
num_data_requests_(0),
num_seek_requests_(0),
num_browser_seek_requests_(0) {}
- virtual ~MockDemuxerAndroid() {}
+ ~MockDemuxerAndroid() override {}
- virtual void Initialize(DemuxerAndroidClient* client) override {}
- virtual void RequestDemuxerData(DemuxerStream::Type type) override {
+ void Initialize(DemuxerAndroidClient* client) override {}
+ void RequestDemuxerData(DemuxerStream::Type type) override {
num_data_requests_++;
if (message_loop_->is_running())
message_loop_->Quit();
}
- virtual void RequestDemuxerSeek(const base::TimeDelta& time_to_seek,
- bool is_browser_seek) override {
+ void RequestDemuxerSeek(const base::TimeDelta& time_to_seek,
+ bool is_browser_seek) override {
num_seek_requests_++;
if (is_browser_seek)
num_browser_seek_requests_++;
@@ -177,7 +192,8 @@ class MediaSourcePlayerTest : public testing::Test {
GURL()),
decoder_callback_hook_executed_(false),
surface_texture_a_is_next_(true) {}
- virtual ~MediaSourcePlayerTest() {}
+
+ ~MediaSourcePlayerTest() override {}
protected:
// Get the decoder job from the MediaSourcePlayer. The return value must not
@@ -423,7 +439,7 @@ class MediaSourcePlayerTest : public testing::Test {
data.type = is_audio ? DemuxerStream::AUDIO : DemuxerStream::VIDEO;
data.access_units.resize(1);
data.access_units[0].status = DemuxerStream::kOk;
- data.access_units[0].end_of_stream = true;
+ data.access_units[0].is_end_of_stream = true;
return data;
}
@@ -435,6 +451,10 @@ class MediaSourcePlayerTest : public testing::Test {
return data;
}
+ bool HasData(bool is_audio) {
+ return GetMediaDecoderJob(is_audio)->HasData();
+ }
+
// Helper method for use at test start. It starts an audio decoder job and
// immediately feeds it some data to decode. Then, without letting the decoder
// job complete a decode cycle, it also starts player SeekTo(). Upon return,
@@ -536,6 +556,37 @@ class MediaSourcePlayerTest : public testing::Test {
EXPECT_LE(target_timestamp, player_.GetCurrentTime());
}
+ void PlayAudioForTimeInterval(const base::TimeDelta& start_timestamp,
+ const base::TimeDelta& target_timestamp ) {
+
+ DemuxerData data = CreateReadFromDemuxerAckForAudio(1);
+ int current_timestamp = start_timestamp.InMilliseconds();
+ int stop_timestamp = target_timestamp.InMilliseconds();
+ while (current_timestamp < stop_timestamp) {
+ data.access_units[0].timestamp =
+ base::TimeDelta::FromMilliseconds(current_timestamp);
+ player_.OnDemuxerDataAvailable(data);
+ current_timestamp += 30;
+ WaitForAudioDecodeDone();
+ }
+ }
+
+ void WaitForDelay(const base::TimeDelta& delay) {
+ // Let the message_loop_ process events.
+ // We post delayed task and RunUnitilIdle() until it signals.
+
+ manager_.SetDelayExpired(false);
+ message_loop_.PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&MockMediaPlayerManager::SetDelayExpired,
+ base::Unretained(&manager_),
+ true),
+ delay);
+
+ while (!manager_.is_delay_expired())
+ message_loop_.RunUntilIdle();
+ }
+
DemuxerData CreateReadFromDemuxerAckWithConfigChanged(
bool is_audio,
int config_unit_index,
@@ -839,6 +890,7 @@ class MediaSourcePlayerTest : public testing::Test {
return GetMediaDecoderJob(is_audio)->drain_decoder_;
}
+ protected:
base::MessageLoop message_loop_;
MockMediaPlayerManager manager_;
MockDemuxerAndroid* demuxer_; // Owned by |player_|.
@@ -859,6 +911,8 @@ class MediaSourcePlayerTest : public testing::Test {
bool surface_texture_a_is_next_;
int next_texture_id_;
+ bool verify_not_audible_is_called_;
+
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayerTest);
};
@@ -890,6 +944,107 @@ TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
EXPECT_FALSE(GetMediaCodecBridge(true));
}
+// timav
+TEST_F(MediaSourcePlayerTest, AudioDecoderSetsAudibleState) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // No data arrived yet
+ EXPECT_FALSE(manager_.is_audible());
+
+ // Initialize decoder
+ StartAudioDecoderJob();
+ player_.SetVolume(1.0);
+
+ // Process frames until prerolling is done.
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(IsPrerolling(true));
+ PrerollDecoderToTime(
+ true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100), false);
+ EXPECT_TRUE(IsPrerolling(false));
+
+ // Send more packets
+ PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(150),
+ base::TimeDelta::FromMilliseconds(220));
+
+ // The player should trigger audible status
+ EXPECT_TRUE(manager_.is_audible());
+
+ // The player release should report a non-audible state.
+ ReleasePlayer();
+ EXPECT_FALSE(manager_.is_audible());
+}
+
+TEST_F(MediaSourcePlayerTest, AudioDecoderRemovesAudibleStateWhenPaused) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // No data arrived yet
+ EXPECT_FALSE(manager_.is_audible());
+
+ // Initialize decoder
+ StartAudioDecoderJob();
+ player_.SetVolume(1.0);
+
+ // Process frames until prerolling is done.
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(IsPrerolling(true));
+ PrerollDecoderToTime(
+ true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100), false);
+ EXPECT_TRUE(IsPrerolling(false));
+
+ // Send more packets
+ PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(150),
+ base::TimeDelta::FromMilliseconds(220));
+
+ // The player should trigger audible status
+ EXPECT_TRUE(manager_.is_audible());
+
+ // Pause the player
+ player_.Pause(true);
+
+ // Send more packets
+ PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(240),
+ base::TimeDelta::FromMilliseconds(280));
+
+ // The player should trigger audible status again
+ EXPECT_FALSE(manager_.is_audible());
+
+ player_.Release();
+}
+
+TEST_F(MediaSourcePlayerTest, AudioDecoderRemovesAudibleStateWhenIdle) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // No data arrived yet
+ EXPECT_FALSE(manager_.is_audible());
+
+ // Initialize decoder
+ StartAudioDecoderJob();
+ player_.SetVolume(1.0);
+
+ // Process frames until prerolling is done.
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(IsPrerolling(true));
+ PrerollDecoderToTime(
+ true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100), false);
+ EXPECT_TRUE(IsPrerolling(false));
+
+ // Send more packets
+ PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(150),
+ base::TimeDelta::FromMilliseconds(220));
+
+ // The player should trigger audible status
+ EXPECT_TRUE(manager_.is_audible());
+
+ // Simulate the freeze on demuxer: wait for 300 ms
+ WaitForDelay(base::TimeDelta::FromMilliseconds(300));
+
+ // By this time the player should have reported
+ // that there is no audio.
+ EXPECT_FALSE(manager_.is_audible());
+
+ ReleasePlayer();
+}
+
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
@@ -1043,7 +1198,9 @@ TEST_F(MediaSourcePlayerTest, SetEmptySurfaceAndStarveWhileDecoding) {
// Playback resumes once a non-empty surface is passed.
CreateNextTextureAndSetVideoSurface();
- EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
+ while(demuxer_->num_browser_seek_requests() != 1)
+ message_loop_.RunUntilIdle();
WaitForVideoDecodeDone();
}
@@ -1534,6 +1691,34 @@ TEST_F(MediaSourcePlayerTest, BrowserSeek_MidStreamReleaseAndStart) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
+TEST_F(MediaSourcePlayerTest, NoBrowserSeekWithKeyFrameInCache) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that browser seek is not needed if a key frame is found in data
+ // cache.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob();
+ DemuxerData data = CreateReadFromDemuxerAckForVideo(false);
+ data.access_units[0].is_key_frame = true;
+
+ // Simulate demuxer's response to the video data request.
+ player_.OnDemuxerDataAvailable(data);
+
+ // Trigger decoder recreation later by changing surfaces.
+ CreateNextTextureAndSetVideoSurface();
+
+ // Wait for the media codec bridge to finish decoding and be reset.
+ WaitForVideoDecodeDone();
+ EXPECT_FALSE(HasData(false));
+
+ // Send a non key frame to decoder so that decoder can continue. This will
+ // not trigger any browser seeks as the previous key frame is still in the
+ // buffer.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo(false));
+ WaitForVideoDecodeDone();
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
+}
+
TEST_F(MediaSourcePlayerTest, PrerollAudioAfterSeek) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
diff --git a/chromium/media/base/android/video_decoder_job.cc b/chromium/media/base/android/video_decoder_job.cc
index bfa0dc32a66..d34ea28aada 100644
--- a/chromium/media/base/android/video_decoder_job.cc
+++ b/chromium/media/base/android/video_decoder_job.cc
@@ -37,8 +37,7 @@ VideoDecoderJob::VideoDecoderJob(
config_height_(0),
output_width_(0),
output_height_(0),
- request_resources_cb_(request_resources_cb),
- next_video_data_is_iframe_(true) {
+ request_resources_cb_(request_resources_cb) {
}
VideoDecoderJob::~VideoDecoderJob() {}
@@ -61,11 +60,6 @@ bool VideoDecoderJob::HasStream() const {
return video_codec_ != kUnknownVideoCodec;
}
-void VideoDecoderJob::Flush() {
- MediaDecoderJob::Flush();
- next_video_data_is_iframe_ = true;
-}
-
void VideoDecoderJob::ReleaseDecoderResources() {
MediaDecoderJob::ReleaseDecoderResources();
surface_ = gfx::ScopedJavaSurface();
@@ -124,16 +118,17 @@ bool VideoDecoderJob::AreDemuxerConfigsChanged(
config_height_ != configs.video_size.height();
}
-bool VideoDecoderJob::CreateMediaCodecBridgeInternal() {
+MediaDecoderJob::MediaDecoderJobStatus
+ VideoDecoderJob::CreateMediaCodecBridgeInternal() {
if (surface_.IsEmpty()) {
ReleaseMediaCodecBridge();
- return false;
+ return STATUS_FAILURE;
}
- // If the next data is not iframe, return false so that the player need to
- // perform a browser seek.
- if (!next_video_data_is_iframe_)
- return false;
+ // If we cannot find a key frame in cache, browser seek is needed.
+ bool next_video_data_is_iframe = SetCurrentFrameToPreviouslyCachedKeyFrame();
+ if (!next_video_data_is_iframe)
+ return STATUS_KEY_FRAME_REQUIRED;
bool is_secure = is_content_encrypted() && drm_bridge() &&
drm_bridge()->IsProtectedSurfaceRequired();
@@ -143,14 +138,10 @@ bool VideoDecoderJob::CreateMediaCodecBridgeInternal() {
surface_.j_surface().obj(), GetMediaCrypto().obj()));
if (!media_codec_bridge_)
- return false;
+ return STATUS_FAILURE;
request_resources_cb_.Run();
- return true;
-}
-
-void VideoDecoderJob::CurrentDataConsumed(bool is_config_change) {
- next_video_data_is_iframe_ = is_config_change;
+ return STATUS_SUCCESS;
}
bool VideoDecoderJob::UpdateOutputFormat() {
@@ -158,7 +149,11 @@ bool VideoDecoderJob::UpdateOutputFormat() {
return false;
int prev_output_width = output_width_;
int prev_output_height = output_height_;
- media_codec_bridge_->GetOutputFormat(&output_width_, &output_height_);
+ // See b/18224769. The values reported from MediaCodecBridge::GetOutputFormat
+ // correspond to the actual video frame size, but this is not necessarily the
+ // size that should be output.
+ output_width_ = config_width_;
+ output_height_ = config_height_;
return (output_width_ != prev_output_width) ||
(output_height_ != prev_output_height);
}
diff --git a/chromium/media/base/android/video_decoder_job.h b/chromium/media/base/android/video_decoder_job.h
index ea99a53373f..36f70d830b9 100644
--- a/chromium/media/base/android/video_decoder_job.h
+++ b/chromium/media/base/android/video_decoder_job.h
@@ -25,41 +25,33 @@ class VideoDecoderJob : public MediaDecoderJob {
const base::Closure& request_data_cb,
const base::Closure& request_resources_cb,
const base::Closure& on_demuxer_config_changed_cb);
- virtual ~VideoDecoderJob();
+ ~VideoDecoderJob() override;
// Passes a java surface object to the codec. Returns true if the surface
// can be used by the decoder, or false otherwise.
bool SetVideoSurface(gfx::ScopedJavaSurface surface);
// MediaDecoderJob implementation.
- virtual bool HasStream() const override;
- virtual void Flush() override;
- virtual void ReleaseDecoderResources() override;
- virtual void SetDemuxerConfigs(const DemuxerConfigs& configs) override;
-
- bool next_video_data_is_iframe() {
- return next_video_data_is_iframe_;
- }
+ bool HasStream() const override;
+ void ReleaseDecoderResources() override;
+ void SetDemuxerConfigs(const DemuxerConfigs& configs) override;
int output_width() const { return output_width_; }
int output_height() const { return output_height_; }
private:
// MediaDecoderJob implementation.
- virtual void ReleaseOutputBuffer(
+ void ReleaseOutputBuffer(
int output_buffer_index,
size_t size,
bool render_output,
base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) override;
- virtual bool ComputeTimeToRender() const override;
- virtual bool IsCodecReconfigureNeeded(
- const DemuxerConfigs& configs) const override;
- virtual bool AreDemuxerConfigsChanged(
- const DemuxerConfigs& configs) const override;
- virtual bool CreateMediaCodecBridgeInternal() override;
- virtual void CurrentDataConsumed(bool is_config_change) override;
- virtual bool UpdateOutputFormat() override;
+ bool ComputeTimeToRender() const override;
+ bool IsCodecReconfigureNeeded(const DemuxerConfigs& configs) const override;
+ bool AreDemuxerConfigsChanged(const DemuxerConfigs& configs) const override;
+ MediaDecoderJobStatus CreateMediaCodecBridgeInternal() override;
+ bool UpdateOutputFormat() override;
// Returns true if a protected surface is required for video playback.
bool IsProtectedSurfaceRequired();
@@ -80,11 +72,6 @@ class VideoDecoderJob : public MediaDecoderJob {
base::Closure request_resources_cb_;
base::Closure release_resources_cb_;
- // Track whether the next access unit is an I-frame. The first access
- // unit after Flush() and CurrentDataConsumed(true) is guaranteed to be an
- // I-frame.
- bool next_video_data_is_iframe_;
-
DISALLOW_COPY_AND_ASSIGN(VideoDecoderJob);
};
diff --git a/chromium/media/base/android/webaudio_media_codec_bridge.cc b/chromium/media/base/android/webaudio_media_codec_bridge.cc
index 160614b4765..12861d67ce1 100644
--- a/chromium/media/base/android/webaudio_media_codec_bridge.cc
+++ b/chromium/media/base/android/webaudio_media_codec_bridge.cc
@@ -15,6 +15,7 @@
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/basictypes.h"
+#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/stl_util.h"
@@ -64,13 +65,13 @@ int WebAudioMediaCodecBridge::SaveEncodedAudioToFile(
// Open the file and unlink it, so that it will be actually removed
// when we close the file.
- int fd = open(temporaryFile.c_str(), O_RDWR);
+ base::ScopedFD fd(open(temporaryFile.c_str(), O_RDWR));
if (unlink(temporaryFile.c_str())) {
VLOG(0) << "Couldn't unlink temp file " << temporaryFile
<< ": " << strerror(errno);
}
- if (fd < 0) {
+ if (!fd.is_valid()) {
return -1;
}
@@ -83,15 +84,15 @@ int WebAudioMediaCodecBridge::SaveEncodedAudioToFile(
return -1;
}
- if (static_cast<uint32_t>(write(fd, encoded_data.memory(), data_size_))
+ if (static_cast<uint32_t>(write(fd.get(), encoded_data.memory(), data_size_))
!= data_size_) {
VLOG(0) << "Failed to write all audio data to temp file!";
return -1;
}
- lseek(fd, 0, SEEK_SET);
+ lseek(fd.get(), 0, SEEK_SET);
- return fd;
+ return fd.release();
}
bool WebAudioMediaCodecBridge::DecodeInMemoryAudioFile() {
diff --git a/chromium/media/base/audio_buffer.cc b/chromium/media/base/audio_buffer.cc
index 5c3e88c87f4..3eff8045637 100644
--- a/chromium/media/base/audio_buffer.cc
+++ b/chromium/media/base/audio_buffer.cc
@@ -51,7 +51,8 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
return;
if (sample_format == kSampleFormatPlanarF32 ||
- sample_format == kSampleFormatPlanarS16) {
+ sample_format == kSampleFormatPlanarS16 ||
+ sample_format == kSampleFormatPlanarS32) {
// Planar data, so need to allocate buffer for each channel.
// Determine per channel data size, taking into account alignment.
int block_size_per_channel =
@@ -161,9 +162,10 @@ scoped_refptr<AudioBuffer> AudioBuffer::CreateEOSBuffer() {
kNoTimestamp()));
}
-// Convert int16 values in the range [kint16min, kint16max] to [-1.0, 1.0].
+// Convert int16 values in the range [INT16_MIN, INT16_MAX] to [-1.0, 1.0].
static inline float ConvertS16ToFloat(int16 value) {
- return value * (value < 0 ? -1.0f / kint16min : 1.0f / kint16max);
+ return value * (value < 0 ? -1.0f / std::numeric_limits<int16>::min()
+ : 1.0f / std::numeric_limits<int16>::max());
}
void AudioBuffer::ReadFrames(int frames_to_copy,
@@ -253,6 +255,12 @@ static inline int32 ConvertF32ToS32(float value) {
: value * std::numeric_limits<int32>::max());
}
+// No need for conversion. Return value as is. Keeping function to align with
+// code structure.
+static inline int32 ConvertS32ToS32(int32 value) {
+ return value;
+}
+
template <class Target, typename Converter>
void InterleaveToS32(const std::vector<uint8*>& channel_data,
size_t frames_to_copy,
@@ -321,6 +329,15 @@ void AudioBuffer::ReadFramesInterleavedS32(int frames_to_copy,
dest_data,
ConvertF32ToS32);
break;
+ case kSampleFormatPlanarS32:
+ // Format is planar signed 32 bit. Convert each value into int32 and
+ // insert into output channel data.
+ InterleaveToS32<int32>(channel_data_,
+ frames_to_copy,
+ trim_start_,
+ dest_data,
+ ConvertS32ToS32);
+ break;
case kUnknownSampleFormat:
NOTREACHED();
break;
@@ -364,6 +381,7 @@ void AudioBuffer::TrimRange(int start, int end) {
switch (sample_format_) {
case kSampleFormatPlanarS16:
case kSampleFormatPlanarF32:
+ case kSampleFormatPlanarS32:
// Planar data must be shifted per channel.
for (int ch = 0; ch < channel_count_; ++ch) {
memmove(channel_data_[ch] + (trim_start_ + start) * bytes_per_channel,
diff --git a/chromium/media/base/audio_buffer_unittest.cc b/chromium/media/base/audio_buffer_unittest.cc
index 165161a3ec1..168445a1e19 100644
--- a/chromium/media/base/audio_buffer_unittest.cc
+++ b/chromium/media/base/audio_buffer_unittest.cc
@@ -488,6 +488,16 @@ static scoped_refptr<AudioBuffer> MakeReadFramesInterleavedTestBuffer(
65536.0f / std::numeric_limits<int32>::max(),
frames,
base::TimeDelta::FromSeconds(0));
+ case kSampleFormatPlanarS32:
+ return MakeAudioBuffer<int32>(
+ sample_format,
+ channel_layout,
+ channel_count,
+ sample_rate,
+ 0.0f,
+ 65536.0f / std::numeric_limits<int32>::max(),
+ frames,
+ base::TimeDelta::FromSeconds(0));
case kSampleFormatU8:
case kUnknownSampleFormat:
EXPECT_FALSE(true);
diff --git a/chromium/media/base/audio_bus.cc b/chromium/media/base/audio_bus.cc
index c6236b642b7..719dbcd5571 100644
--- a/chromium/media/base/audio_bus.cc
+++ b/chromium/media/base/audio_bus.cc
@@ -42,8 +42,8 @@ template<class Format, class Fixed, Format Bias>
static void FromInterleavedInternal(const void* src, int start_frame,
int frames, AudioBus* dest,
float min, float max) {
- COMPILE_ASSERT((Bias == 0 && sizeof(Fixed) == sizeof(Format)) ||
- sizeof(Fixed) > sizeof(Format), invalid_deinterleave_types);
+ static_assert((Bias == 0 && sizeof(Fixed) == sizeof(Format)) ||
+ sizeof(Fixed) > sizeof(Format), "invalid deinterleave types");
const Format* source = static_cast<const Format*>(src);
const int channels = dest->channels();
for (int ch = 0; ch < channels; ++ch) {
@@ -62,8 +62,8 @@ static void FromInterleavedInternal(const void* src, int start_frame,
template<class Format, class Fixed, Format Bias>
static void ToInterleavedInternal(const AudioBus* source, int start_frame,
int frames, void* dst, Fixed min, Fixed max) {
- COMPILE_ASSERT((Bias == 0 && sizeof(Fixed) == sizeof(Format)) ||
- sizeof(Fixed) > sizeof(Format), invalid_interleave_types);
+ static_assert((Bias == 0 && sizeof(Fixed) == sizeof(Format)) ||
+ sizeof(Fixed) > sizeof(Format), "invalid interleave types");
Format* dest = static_cast<Format*>(dst);
const int channels = source->channels();
for (int ch = 0; ch < channels; ++ch) {
diff --git a/chromium/media/base/audio_bus_perftest.cc b/chromium/media/base/audio_bus_perftest.cc
index 0af0235073e..71e81323950 100644
--- a/chromium/media/base/audio_bus_perftest.cc
+++ b/chromium/media/base/audio_bus_perftest.cc
@@ -18,22 +18,22 @@ void RunInterleaveBench(AudioBus* bus, const std::string& trace_name) {
scoped_ptr<T[]> interleaved(new T[frame_size]);
const int byte_size = sizeof(T);
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
for (int i = 0; i < kBenchmarkIterations; ++i) {
bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
}
double total_time_milliseconds =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ (base::TimeTicks::Now() - start).InMillisecondsF();
perf_test::PrintResult(
"audio_bus_to_interleaved", "", trace_name,
total_time_milliseconds / kBenchmarkIterations, "ms", true);
- start = base::TimeTicks::HighResNow();
+ start = base::TimeTicks::Now();
for (int i = 0; i < kBenchmarkIterations; ++i) {
bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
}
total_time_milliseconds =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ (base::TimeTicks::Now() - start).InMillisecondsF();
perf_test::PrintResult(
"audio_bus_from_interleaved", "", trace_name,
total_time_milliseconds / kBenchmarkIterations, "ms", true);
diff --git a/chromium/media/base/audio_converter.cc b/chromium/media/base/audio_converter.cc
index 194111914e0..be68ac627fa 100644
--- a/chromium/media/base/audio_converter.cc
+++ b/chromium/media/base/audio_converter.cc
@@ -33,7 +33,8 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
CHECK(output_params.IsValid());
// Handle different input and output channel layouts.
- if (input_params.channel_layout() != output_params.channel_layout()) {
+ if (input_params.channel_layout() != output_params.channel_layout() ||
+ input_params.channels() != output_params.channels()) {
DVLOG(1) << "Remixing channel layout from " << input_params.channel_layout()
<< " to " << output_params.channel_layout() << "; from "
<< input_params.channels() << " channels to "
diff --git a/chromium/media/base/audio_converter_perftest.cc b/chromium/media/base/audio_converter_perftest.cc
index c6a475864b7..92bff291308 100644
--- a/chromium/media/base/audio_converter_perftest.cc
+++ b/chromium/media/base/audio_converter_perftest.cc
@@ -39,12 +39,12 @@ void RunConvertBenchmark(const AudioParameters& in_params,
converter.AddInput(&fake_input2);
converter.AddInput(&fake_input3);
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
for (int i = 0; i < kBenchmarkIterations; ++i) {
converter.Convert(output_bus.get());
}
double runs_per_second = kBenchmarkIterations /
- (base::TimeTicks::HighResNow() - start).InSecondsF();
+ (base::TimeTicks::Now() - start).InSecondsF();
perf_test::PrintResult(
"audio_converter", "", trace_name, runs_per_second, "runs/s", true);
}
diff --git a/chromium/media/base/audio_converter_unittest.cc b/chromium/media/base/audio_converter_unittest.cc
index b1564db4233..dcc3db69b14 100644
--- a/chromium/media/base/audio_converter_unittest.cc
+++ b/chromium/media/base/audio_converter_unittest.cc
@@ -194,15 +194,17 @@ class AudioConverterTest
};
// Ensure the buffer delay provided by AudioConverter is accurate.
-TEST(AudioConverterTest, AudioDelay) {
+TEST(AudioConverterTest, AudioDelayAndDiscreteChannelCount) {
// Choose input and output parameters such that the transform must make
// multiple calls to fill the buffer.
- AudioParameters input_parameters = AudioParameters(
- AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate,
- kBitsPerChannel, kLowLatencyBufferSize);
- AudioParameters output_parameters = AudioParameters(
- AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate * 2,
- kBitsPerChannel, kHighLatencyBufferSize);
+ AudioParameters input_parameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_DISCRETE, 10, kSampleRate,
+ kBitsPerChannel, kLowLatencyBufferSize,
+ AudioParameters::NO_EFFECTS);
+ AudioParameters output_parameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_DISCRETE, 5, kSampleRate * 2,
+ kBitsPerChannel, kHighLatencyBufferSize,
+ AudioParameters::NO_EFFECTS);
AudioConverter converter(input_parameters, output_parameters, false);
FakeAudioRenderCallback callback(0.2);
@@ -225,6 +227,7 @@ TEST(AudioConverterTest, AudioDelay) {
EXPECT_EQ(expected_last_delay_milliseconds,
callback.last_audio_delay_milliseconds());
+ EXPECT_EQ(input_parameters.channels(), callback.last_channel_count());
}
TEST_P(AudioConverterTest, ArbitraryOutputRequestSize) {
diff --git a/chromium/media/base/audio_decoder_config.cc b/chromium/media/base/audio_decoder_config.cc
index 06a1643f0a1..780bada49e9 100644
--- a/chromium/media/base/audio_decoder_config.cc
+++ b/chromium/media/base/audio_decoder_config.cc
@@ -6,10 +6,8 @@
#include "base/logging.h"
#include "base/metrics/histogram.h"
-#include "base/time/time.h"
#include "media/audio/sample_rates.h"
#include "media/base/limits.h"
-#include "media/base/sample_format.h"
namespace media {
@@ -108,7 +106,7 @@ bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
std::string AudioDecoderConfig::AsHumanReadableString() const {
std::ostringstream s;
- s << "codec: " << codec()
+ s << "codec: " << GetHumanReadableCodecName()
<< " bytes_per_channel: " << bytes_per_channel()
<< " channel_layout: " << channel_layout()
<< " samples_per_second: " << samples_per_second()
@@ -121,4 +119,40 @@ std::string AudioDecoderConfig::AsHumanReadableString() const {
return s.str();
}
+// These names come from src/third_party/ffmpeg/libavcodec/codec_desc.c
+std::string AudioDecoderConfig::GetHumanReadableCodecName() const {
+ switch (codec()) {
+ case kUnknownAudioCodec:
+ return "unknown";
+ case kCodecAAC:
+ return "aac";
+ case kCodecMP3:
+ return "mp3";
+ case kCodecPCM:
+ case kCodecPCM_S16BE:
+ case kCodecPCM_S24BE:
+ return "pcm";
+ case kCodecVorbis:
+ return "vorbis";
+ case kCodecFLAC:
+ return "flac";
+ case kCodecAMR_NB:
+ return "amr_nb";
+ case kCodecAMR_WB:
+ return "amr_wb";
+ case kCodecGSM_MS:
+ return "gsm_ms";
+ case kCodecPCM_ALAW:
+ return "pcm_alaw";
+ case kCodecPCM_MULAW:
+ return "pcm_mulaw";
+ case kCodecOpus:
+ return "opus";
+ case kCodecALAC:
+ return "alac";
+ }
+ NOTREACHED();
+ return "";
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_decoder_config.h b/chromium/media/base/audio_decoder_config.h
index c8c7b47d23d..9b2fe96855d 100644
--- a/chromium/media/base/audio_decoder_config.h
+++ b/chromium/media/base/audio_decoder_config.h
@@ -36,13 +36,14 @@ enum AudioCodec {
kCodecOpus = 12,
// kCodecEAC3 = 13,
kCodecPCM_ALAW = 14,
+ kCodecALAC = 15,
// DO NOT ADD RANDOM AUDIO CODECS!
//
// The only acceptable time to add a new codec is if there is production code
// that uses said codec in the same CL.
// Must always be equal to the largest entry ever logged.
- kAudioCodecMax = kCodecPCM_ALAW,
+ kAudioCodecMax = kCodecALAC,
};
// TODO(dalecurtis): FFmpeg API uses |bytes_per_channel| instead of
@@ -83,6 +84,8 @@ class MEDIA_EXPORT AudioDecoderConfig {
// output only.
std::string AsHumanReadableString() const;
+ std::string GetHumanReadableCodecName() const;
+
AudioCodec codec() const { return codec_; }
int bits_per_channel() const { return bytes_per_channel_ * 8; }
int bytes_per_channel() const { return bytes_per_channel_; }
diff --git a/chromium/media/base/audio_discard_helper.cc b/chromium/media/base/audio_discard_helper.cc
index 8405d0b01b9..f7c645cc20e 100644
--- a/chromium/media/base/audio_discard_helper.cc
+++ b/chromium/media/base/audio_discard_helper.cc
@@ -8,7 +8,6 @@
#include "base/logging.h"
#include "media/base/audio_buffer.h"
-#include "media/base/buffers.h"
namespace media {
diff --git a/chromium/media/base/audio_renderer.h b/chromium/media/base/audio_renderer.h
index 733daa65d7a..586936a1e81 100644
--- a/chromium/media/base/audio_renderer.h
+++ b/chromium/media/base/audio_renderer.h
@@ -8,6 +8,7 @@
#include "base/callback.h"
#include "base/time/time.h"
#include "media/base/buffering_state.h"
+#include "media/base/decryptor.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
@@ -24,7 +25,11 @@ class MEDIA_EXPORT AudioRenderer {
virtual ~AudioRenderer();
// Initialize an AudioRenderer with |stream|, executing |init_cb| upon
- // completion.
+ // completion. If initialization fails, only |init_cb| (not |error_cb|) will
+ // be called.
+ //
+ // |set_decryptor_ready_cb| is fired when a Decryptor is needed, i.e. when the
+ // |stream| is encrypted.
//
// |statistics_cb| is executed periodically with audio rendering stats.
//
@@ -33,13 +38,19 @@ class MEDIA_EXPORT AudioRenderer {
//
// |ended_cb| is executed when audio rendering has reached the end of stream.
//
- // |error_cb| is executed if an error was encountered.
- virtual void Initialize(DemuxerStream* stream,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb) = 0;
+ // |error_cb| is executed if an error was encountered after initialization.
+ //
+ // |waiting_for_decryption_key_cb| is called whenever the key needed to
+ // decrypt the stream is not available.
+ virtual void Initialize(
+ DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const base::Closure& waiting_for_decryption_key_cb) = 0;
// Returns the TimeSource associated with audio rendering.
virtual TimeSource* GetTimeSource() = 0;
diff --git a/chromium/media/base/audio_shifter.cc b/chromium/media/base/audio_shifter.cc
new file mode 100644
index 00000000000..11b7b6fab8f
--- /dev/null
+++ b/chromium/media/base/audio_shifter.cc
@@ -0,0 +1,287 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <cmath>
+
+#include "base/bind.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_shifter.h"
+
+namespace media {
+
+// return true if x is between a and b.
+static bool between(double x, double a, double b) {
+ if (b < a)
+ return b <= x && x <= a;
+ return a <= x && x <= b;
+}
+
+class ClockSmoother {
+ public:
+ explicit ClockSmoother(base::TimeDelta clock_accuracy) :
+ clock_accuracy_(clock_accuracy),
+ inaccuracy_delta_(clock_accuracy * 10) {
+ inaccuracies_.push_back(std::make_pair(inaccuracy_sum_, inaccuracy_delta_));
+ }
+
+ base::TimeTicks Smooth(base::TimeTicks t,
+ base::TimeDelta delta) {
+ base::TimeTicks ret = t;
+ if (!previous_.is_null()) {
+ base::TimeDelta actual_delta = t - previous_;
+ base::TimeDelta new_fraction_off = actual_delta - delta;
+ inaccuracy_sum_ += new_fraction_off;
+ inaccuracy_delta_ += actual_delta;
+ inaccuracies_.push_back(std::make_pair(new_fraction_off, actual_delta));
+ if (inaccuracies_.size() > 1000) {
+ inaccuracy_sum_ -= inaccuracies_.front().first;
+ inaccuracy_delta_ -= inaccuracies_.front().second;
+ inaccuracies_.pop_front();
+ }
+ // 0.01 means 1% faster than regular clock.
+ // -0.02 means 2% slower than regular clock.
+ double fraction_off = inaccuracy_sum_.InSecondsF() /
+ inaccuracy_delta_.InSecondsF();
+
+ double delta_seconds = delta.InSecondsF();
+ delta_seconds += delta_seconds * fraction_off;
+ base::TimeTicks expected = previous_ +
+ base::TimeDelta::FromSecondsD(delta_seconds);
+ base::TimeDelta diff = t - expected;
+ if (diff < clock_accuracy_ && diff > -clock_accuracy_) {
+ ret = t + diff / 1000;
+ }
+ }
+ previous_ = ret;
+ return ret;
+ }
+
+ // 1.01 means 1% faster than regular clock.
+ // -0.98 means 2% slower than regular clock.
+ double Rate() const {
+ return 1.0 + inaccuracy_sum_.InSecondsF() /
+ inaccuracy_delta_.InSecondsF();
+ }
+
+ private:
+ base::TimeDelta clock_accuracy_;
+ std::deque<std::pair<base::TimeDelta, base::TimeDelta> > inaccuracies_;
+ base::TimeDelta inaccuracy_sum_;
+ base::TimeDelta inaccuracy_delta_;
+ base::TimeTicks previous_;
+};
+
+AudioShifter::AudioQueueEntry::AudioQueueEntry(
+ base::TimeTicks target_playout_time_,
+ scoped_ptr<AudioBus> audio_) :
+ target_playout_time(target_playout_time_),
+ audio(audio_.release()) {
+}
+
+AudioShifter::AudioQueueEntry::~AudioQueueEntry() {}
+
+AudioShifter::AudioShifter(base::TimeDelta max_buffer_size,
+ base::TimeDelta clock_accuracy,
+ base::TimeDelta adjustment_time,
+ size_t rate,
+ int channels) :
+ max_buffer_size_(max_buffer_size),
+ clock_accuracy_(clock_accuracy),
+ adjustment_time_(adjustment_time),
+ rate_(rate),
+ input_clock_smoother_(new ClockSmoother(clock_accuracy)),
+ output_clock_smoother_(new ClockSmoother(clock_accuracy)),
+ running_(false),
+ position_(0),
+ previous_requested_samples_(0),
+ resampler_(channels, 1.0, 96,
+ base::Bind(&AudioShifter::ResamplerCallback,
+ base::Unretained(this))),
+ current_ratio_(1.0) {
+}
+
+AudioShifter::~AudioShifter() {}
+
+void AudioShifter::Push(scoped_ptr<AudioBus> input,
+ base::TimeTicks playout_time) {
+ if (!queue_.empty()) {
+ playout_time = input_clock_smoother_->Smooth(
+ playout_time,
+ base::TimeDelta::FromSeconds(queue_.back().audio->frames()) / rate_);
+ }
+ queue_.push_back(AudioQueueEntry(playout_time, input.Pass()));
+ while (!queue_.empty() &&
+ queue_.back().target_playout_time -
+ queue_.front().target_playout_time > max_buffer_size_) {
+ DVLOG(1) << "AudioShifter: Audio overflow!";
+ queue_.pop_front();
+ position_ = 0;
+ }
+}
+
+void AudioShifter::Pull(AudioBus* output,
+ base::TimeTicks playout_time) {
+ // Add the kernel size since we incur some internal delay in
+ // resampling. All resamplers incur some delay, and for the
+ // SincResampler (used by MultiChannelResampler), this is
+ // (currently) kKernalSize / 2 frames.
+ playout_time += base::TimeDelta::FromSeconds(
+ SincResampler::kKernelSize) / rate_ / 2;
+ playout_time = output_clock_smoother_->Smooth(
+ playout_time,
+ base::TimeDelta::FromSeconds(previous_requested_samples_) / rate_);
+ previous_requested_samples_ = output->frames();
+
+ base::TimeTicks stream_time;
+ base::TimeTicks buffer_end_time;
+ if (queue_.empty()) {
+ DCHECK_EQ(position_, 0UL);
+ stream_time = end_of_last_consumed_audiobus_;
+ buffer_end_time = end_of_last_consumed_audiobus_;
+ } else {
+ stream_time = queue_.front().target_playout_time;
+ buffer_end_time = queue_.back().target_playout_time;
+ }
+ stream_time += base::TimeDelta::FromSecondsD(
+ (position_ - resampler_.BufferedFrames()) / rate_);
+
+ if (!running_ &&
+ base::TimeDelta::FromSeconds(output->frames() * 2) / rate_ +
+ clock_accuracy_ > buffer_end_time - stream_time) {
+ // We're not running right now, and we don't really have enough data
+ // to satisfy output reliably. Wait.
+ Zero(output);
+ return;
+ }
+ if (playout_time < stream_time -
+ base::TimeDelta::FromSeconds(output->frames()) / rate_ / 2 -
+ (running_ ? clock_accuracy_ : base::TimeDelta())) {
+ // |playout_time| is too far before the earliest known audio sample.
+ Zero(output);
+ return;
+ }
+
+ if (buffer_end_time < playout_time) {
+ // If the "playout_time" is actually capture time, then
+ // the entire queue will be in the past. Since we cannot
+ // play audio in the past. We add one buffer size to the
+ // bias to avoid buffer underruns in the future.
+ if (bias_ == base::TimeDelta()) {
+ bias_ = playout_time - stream_time +
+ clock_accuracy_ +
+ base::TimeDelta::FromSeconds(output->frames()) / rate_;
+ }
+ stream_time += bias_;
+ } else {
+ // Normal case, some part of the queue is
+ // ahead of the scheduled playout time.
+
+ // Skip any data that is simply too old, if we have
+ // better data somewhere in the qeueue.
+
+ // Reset bias
+ bias_ = base::TimeDelta();
+
+ while (!queue_.empty() &&
+ playout_time - stream_time > clock_accuracy_) {
+ queue_.pop_front();
+ position_ = 0;
+ resampler_.Flush();
+ if (queue_.empty()) {
+ Zero(output);
+ return;
+ }
+ stream_time = queue_.front().target_playout_time;
+ }
+ }
+
+ running_ = true;
+ double steady_ratio = output_clock_smoother_->Rate() /
+ input_clock_smoother_->Rate();
+ double time_difference = (playout_time - stream_time).InSecondsF();
+ double adjustment_time = adjustment_time_.InSecondsF();
+ // This is the ratio we would need to get perfect sync after
+ // |adjustment_time| has passed.
+ double slow_ratio = steady_ratio + time_difference / adjustment_time;
+ slow_ratio = std::max(0.9, std::min(1.1, slow_ratio));
+ adjustment_time = output->frames() / static_cast<double>(rate_);
+ // This is ratio we we'd need get perfect sync at the end of the
+ // current output audiobus.
+ double fast_ratio = steady_ratio + time_difference / adjustment_time;
+ fast_ratio = std::max(0.9, std::min(1.1, fast_ratio));
+
+ // If the current ratio is somewhere between the slow and the fast
+ // ratio, then keep it. This means we don't have to recalculate the
+ // tables very often and also allows us to converge on good sync faster.
+ if (!between(current_ratio_, slow_ratio, fast_ratio)) {
+ // Check if the direction has changed.
+ if ((current_ratio_ < steady_ratio) == (slow_ratio < steady_ratio)) {
+ // Two possible scenarios:
+ // Either we're really close to perfect sync, but the current ratio
+ // would overshoot, or the current ratio is insufficient to get to
+ // perfect sync in the alloted time. Clamp.
+ double max_ratio = std::max(fast_ratio, slow_ratio);
+ double min_ratio = std::min(fast_ratio, slow_ratio);
+ current_ratio_ = std::min(max_ratio,
+ std::max(min_ratio, current_ratio_));
+ } else {
+ // The "direction" has changed. (From speed up to slow down or
+ // vice versa, so we just take the slow ratio.
+ current_ratio_ = slow_ratio;
+ }
+ resampler_.SetRatio(current_ratio_);
+ }
+ resampler_.Resample(output->frames(), output);
+}
+
+void AudioShifter::ResamplerCallback(int frame_delay, AudioBus* destination) {
+ // TODO(hubbe): Use frame_delay
+ int pos = 0;
+ while (pos < destination->frames() && !queue_.empty()) {
+ size_t to_copy = std::min<size_t>(
+ queue_.front().audio->frames() - position_,
+ destination->frames() - pos);
+ CHECK_GT(to_copy, 0UL);
+ queue_.front().audio->CopyPartialFramesTo(position_,
+ to_copy,
+ pos,
+ destination);
+ pos += to_copy;
+ position_ += to_copy;
+ if (position_ >= static_cast<size_t>(queue_.front().audio->frames())) {
+ end_of_last_consumed_audiobus_ = queue_.front().target_playout_time +
+ base::TimeDelta::FromSeconds(queue_.front().audio->frames()) / rate_;
+ position_ -= queue_.front().audio->frames();
+ queue_.pop_front();
+ }
+ }
+
+ if (pos < destination->frames()) {
+ // Underflow
+ running_ = false;
+ position_ = 0;
+ previous_playout_time_ = base::TimeTicks();
+ bias_ = base::TimeDelta();
+ destination->ZeroFramesPartial(pos, destination->frames() - pos);
+ }
+}
+
+void AudioShifter::Flush() {
+ resampler_.Flush();
+ position_ = 0;
+ queue_.clear();
+ running_ = false;
+ previous_playout_time_ = base::TimeTicks();
+ bias_ = base::TimeDelta();
+}
+
+void AudioShifter::Zero(AudioBus* output) {
+ output->Zero();
+ running_ = false;
+ previous_playout_time_ = base::TimeTicks();
+ bias_ = base::TimeDelta();
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_shifter.h b/chromium/media/base/audio_shifter.h
new file mode 100644
index 00000000000..98239a42f4a
--- /dev/null
+++ b/chromium/media/base/audio_shifter.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_SHIFTER_H
+#define MEDIA_BASE_AUDIO_SHIFTER_H
+
+#include <deque>
+
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/multi_channel_resampler.h"
+
+namespace media {
+
+class AudioBus;
+class ClockSmoother;
+
+// This class works like a buffer between a push based audio source
+// and a pull-based audio sink. The source and sink should operate
+// at nominally the same rate, but since they may run on different
+// hardware clocks, the rate may differ a little. If left unchecked,
+// this difference will first cause lip sync issues between audio
+// and video and eventually it will cause buffer overruns/underruns.
+// This class solves all that by dynamically resampling the audio
+// so that both input and output sources are happy.
+//
+// A note about TimeTicks. The playout_time specified in Push and
+// Pull calls must come from the same timeline. That timeline can
+// be anything you choose as it is never compared to any real-world
+// clocks, but they must come from the same clock. Specifically,
+// specifying samples / rate as the playout time in Push() or Pull()
+// will NOT work.
+//
+class MEDIA_EXPORT AudioShifter {
+ public:
+ // |max_buffer_size| is how much audio we are allowed to buffer.
+ // Often, this can be set fairly large as Push() will limit the
+ // size when it specifies when to play the audio.
+ // |clock_accuracy| is used to determine if a skip has occured
+ // in the audio (as opposed to an inaccuracy in the timestamp.)
+ // It also limits the smallest amount of buffering allowed.
+ // |adjustement_time| specifies how long time should be used
+ // to adjust the audio. This should normally at least a few
+ // seconds. The larger the value, the smoother and less audible
+ // the transitions will be. (But it means that perfect audio
+ // sync will take longer to achive.)
+ // |rate| is audio frames per second, eg 48000.
+ // |channels| is number of channels in input and output audio.
+ // TODO(hubbe): Allow input rate and output rate to be different
+ // since we're going to be resampling anyways.
+ AudioShifter(base::TimeDelta max_buffer_size,
+ base::TimeDelta clock_accuracy,
+ base::TimeDelta adjustment_time,
+ size_t rate,
+ int channels);
+ ~AudioShifter();
+
+ // Push Audio into the shifter. All inputs must have the same number of
+ // channels, but bus size can vary. The playout time can be noisy and
+ // does not have to line up perfectly with the number of samples pushed
+ // so far. However, the playout_time in Push calls and Pull calls must
+ // not diverge over time.
+ // Given audio from an a microphone, a reasonable way to calculate
+ // playout_time would be now + 30ms.
+ // Ideally playout_time is some time in the future, in which case
+ // the samples will be buffered until the approperiate time. If
+ // playout_time is in the past, everything will still work, and we'll
+ // try to keep the buffring to a minimum.
+ void Push(scoped_ptr<AudioBus> input, base::TimeTicks playout_time);
+
+ // Fills out |output| with samples. Tries to stretch/shrink the audio
+ // to compensate for drift between input and output.
+ // If called from an output device data pull, a reasonable way to
+ // calculate playout_time would be now + audio pipeline delay.
+ void Pull(AudioBus* output, base::TimeTicks playout_time);
+
+ // Flush audio (but leave timing info)
+ void Flush();
+
+private:
+ void Zero(AudioBus* output);
+ void ResamplerCallback(int frame_delay, AudioBus* destination);
+
+ struct AudioQueueEntry {
+ AudioQueueEntry(base::TimeTicks target_playout_time_,
+ scoped_ptr<AudioBus> audio_);
+ ~AudioQueueEntry();
+ base::TimeTicks target_playout_time;
+ linked_ptr<AudioBus> audio;
+ };
+
+ typedef std::deque<AudioQueueEntry> AudioShifterQueue;
+
+ // Set from constructor.
+ const base::TimeDelta max_buffer_size_;
+ const base::TimeDelta clock_accuracy_;
+ const base::TimeDelta adjustment_time_;
+ const size_t rate_;
+
+ // The clock smoothers are used to smooth out timestamps
+ // and adjust for drift and inaccurate clocks.
+ scoped_ptr<ClockSmoother> input_clock_smoother_;
+ scoped_ptr<ClockSmoother> output_clock_smoother_;
+
+ // Are we currently outputting data?
+ bool running_;
+
+ // Number of frames already consumed from |queue_|.
+ size_t position_;
+
+ // Queue of data provided to us.
+ AudioShifterQueue queue_;
+
+ // Timestamp from alst Pull() call.
+ base::TimeTicks previous_playout_time_;
+ // Number of rames requested in last Pull call.
+ size_t previous_requested_samples_;
+
+ // Timestamp at the end of last audio bus
+ // consumed by resampler.
+ base::TimeTicks end_of_last_consumed_audiobus_;
+
+ // If Push() timestamps are in the past, we have to decidede the playout delay
+ // ourselves. The delay is then stored here.
+ base::TimeDelta bias_;
+
+ // Resampler.
+ MultiChannelResampler resampler_;
+
+ // Current resampler ratio.
+ double current_ratio_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_SHIFTER_H
diff --git a/chromium/media/base/audio_shifter_unittest.cc b/chromium/media/base/audio_shifter_unittest.cc
new file mode 100644
index 00000000000..e47067e21c4
--- /dev/null
+++ b/chromium/media/base/audio_shifter_unittest.cc
@@ -0,0 +1,209 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+#include <vector>
+
+#include "media/base/audio_bus.h"
+#include "media/base/audio_shifter.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+const int kSampleRate = 48000;
+const int kInputPacketSize = 48;
+const int kOutputPacketSize = 24;
+
+#if GTEST_HAS_COMBINE
+
+class AudioShifterTest :
+ public ::testing::TestWithParam<::testing::tuple<int, int, int, bool> > {
+ public:
+ AudioShifterTest()
+ : shifter_(base::TimeDelta::FromMilliseconds(2000),
+ base::TimeDelta::FromMilliseconds(3),
+ base::TimeDelta::FromMilliseconds(100),
+ kSampleRate,
+ 2),
+ end2end_latency_(base::TimeDelta::FromMilliseconds(30)),
+ playback_latency_(base::TimeDelta::FromMilliseconds(10)),
+ tag_input_(false),
+ expect_smooth_output_(true),
+ input_sample_n_(0),
+ output_sample_(0) {
+ }
+
+ void SetupInput(int size, base::TimeDelta rate) {
+ input_size_ = size;
+ input_rate_ = rate;
+ }
+
+ scoped_ptr<AudioBus> CreateTestInput() {
+ scoped_ptr<AudioBus> input(AudioBus::Create(2, input_size_));
+ for (size_t i = 0; i < input_size_; i++) {
+ input->channel(0)[i] = input->channel(1)[i] = input_sample_n_;
+ input_sample_n_++;
+ }
+ if (tag_input_) {
+ input->channel(0)[0] = 10000000.0;
+ tag_input_ = false;
+ expect_smooth_output_ = false;
+ }
+ return input.Pass();
+ }
+
+ void SetupOutput(int size, base::TimeDelta rate) {
+ test_output_ = AudioBus::Create(2, size);
+ output_rate_ = rate;
+ }
+
+ void SetUp() override {
+ SetupInput(
+ kInputPacketSize + ::testing::get<0>(GetParam()) - 1,
+ base::TimeDelta::FromMicroseconds(
+ 1000 + ::testing::get<1>(GetParam()) * 5 - 5));
+ SetupOutput(
+ kOutputPacketSize,
+ base::TimeDelta::FromMicroseconds(
+ 500 + ::testing::get<2>(GetParam()) * 3 - 3));
+ if (::testing::get<3>(GetParam())) {
+ end2end_latency_ = -end2end_latency_;
+ }
+ }
+
+ void Run(size_t loops) {
+ for (size_t i = 0; i < loops;) {
+ if (now_ >= time_to_push_) {
+ shifter_.Push(CreateTestInput(), now_ + end2end_latency_);
+ time_to_push_ += input_rate_;
+ i++;
+ }
+ if (now_ >= time_to_pull_) {
+ shifter_.Pull(test_output_.get(), now_ + playback_latency_);
+ bool silence = true;
+ for (size_t j = 0;
+ j < static_cast<size_t>(test_output_->frames());
+ j++) {
+ if (test_output_->channel(0)[j] != 0.0) {
+ silence = false;
+ if (test_output_->channel(0)[j] > 3000000.0) {
+ marker_outputs_.push_back(
+ now_ + playback_latency_ +
+ base::TimeDelta::FromSeconds(j) / kSampleRate);
+ } else {
+ // We don't expect smooth output once we insert a tag,
+ // or in the very beginning.
+ if (expect_smooth_output_ && output_sample_ > 500.0) {
+ EXPECT_GT(test_output_->channel(0)[j], output_sample_ - 3)
+ << "j = " << j;
+ if (test_output_->channel(0)[j] >
+ output_sample_ + kOutputPacketSize / 2) {
+ skip_outputs_.push_back(now_ + playback_latency_);
+ }
+ }
+ output_sample_ = test_output_->channel(0)[j];
+ }
+ }
+ }
+ if (silence) {
+ silent_outputs_.push_back(now_);
+ }
+ time_to_pull_ += output_rate_;
+ }
+ now_ += std::min(time_to_push_ - now_,
+ time_to_pull_ - now_);
+ }
+ }
+
+ void RunAndCheckSync(size_t loops) {
+ Run(100);
+ size_t expected_silent_outputs = silent_outputs_.size();
+ Run(loops);
+ tag_input_ = true;
+ CHECK(marker_outputs_.empty());
+ base::TimeTicks expected_mark_time = time_to_push_ + end2end_latency_;
+ Run(100);
+ if (end2end_latency_ > base::TimeDelta()) {
+ CHECK(!marker_outputs_.empty());
+ base::TimeDelta actual_offset = marker_outputs_[0] - expected_mark_time;
+ EXPECT_LT(actual_offset, base::TimeDelta::FromMicroseconds(100));
+ EXPECT_GT(actual_offset, base::TimeDelta::FromMicroseconds(-100));
+ } else {
+ EXPECT_GT(marker_outputs_.size(), 0UL);
+ }
+ EXPECT_EQ(expected_silent_outputs, silent_outputs_.size());
+ }
+
+ protected:
+ AudioShifter shifter_;
+ base::TimeDelta input_rate_;
+ base::TimeDelta output_rate_;
+ base::TimeDelta end2end_latency_;
+ base::TimeDelta playback_latency_;
+ base::TimeTicks time_to_push_;
+ base::TimeTicks time_to_pull_;
+ base::TimeTicks now_;
+ scoped_ptr<AudioBus> test_input_;
+ scoped_ptr<AudioBus> test_output_;
+ std::vector<base::TimeTicks> silent_outputs_;
+ std::vector<base::TimeTicks> skip_outputs_;
+ std::vector<base::TimeTicks> marker_outputs_;
+ size_t input_size_;
+ bool tag_input_;
+ bool expect_smooth_output_;
+ size_t input_sample_n_;
+ double output_sample_;
+};
+
+TEST_P(AudioShifterTest, TestSync) {
+ RunAndCheckSync(1000);
+ EXPECT_EQ(0UL, skip_outputs_.size());
+}
+
+TEST_P(AudioShifterTest, TestSyncWithPush) {
+ // Push some extra audio.
+ shifter_.Push(CreateTestInput().Pass(), now_ - base::TimeDelta(input_rate_));
+ RunAndCheckSync(1000);
+ EXPECT_LE(skip_outputs_.size(), 2UL);
+}
+
+TEST_P(AudioShifterTest, TestSyncWithPull) {
+ // Output should smooth out eventually, but that is not tested yet.
+ expect_smooth_output_ = false;
+ Run(100);
+ for (int i = 0; i < 100; i++) {
+ shifter_.Pull(test_output_.get(),
+ now_ + base::TimeDelta::FromMilliseconds(i));
+ }
+ RunAndCheckSync(1000);
+ EXPECT_LE(skip_outputs_.size(), 1UL);
+}
+
+TEST_P(AudioShifterTest, UnderOverFlow) {
+ expect_smooth_output_ = false;
+ SetupInput(
+ kInputPacketSize + ::testing::get<0>(GetParam()) * 10 - 10,
+ base::TimeDelta::FromMicroseconds(
+ 1000 + ::testing::get<1>(GetParam()) * 100 - 100));
+ SetupOutput(
+ kOutputPacketSize,
+ base::TimeDelta::FromMicroseconds(
+ 500 + ::testing::get<2>(GetParam()) * 50 - 50));
+ // Sane output is not expected, but let's make sure we don't crash.
+ Run(1000);
+}
+
+// Note: First argument is optional and intentionally left blank.
+// (it's a prefix for the generated test cases)
+INSTANTIATE_TEST_CASE_P(
+ ,
+ AudioShifterTest,
+ ::testing::Combine(::testing::Range(0, 3),
+ ::testing::Range(0, 3),
+ ::testing::Range(0, 3),
+ ::testing::Bool()));
+
+#endif
+
+} // namespace media
diff --git a/chromium/media/base/audio_splicer_unittest.cc b/chromium/media/base/audio_splicer_unittest.cc
index 0d2c6062a51..d64302c7627 100644
--- a/chromium/media/base/audio_splicer_unittest.cc
+++ b/chromium/media/base/audio_splicer_unittest.cc
@@ -15,7 +15,7 @@ namespace media {
// Do not change this format. AddInput() and GetValue() only work with float.
static const SampleFormat kSampleFormat = kSampleFormatF32;
-COMPILE_ASSERT(kSampleFormat == kSampleFormatF32, invalid_splice_format);
+static_assert(kSampleFormat == kSampleFormatF32, "invalid splice format");
static const int kChannels = 1;
static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_MONO;
diff --git a/chromium/media/base/bind_to_current_loop.h b/chromium/media/base/bind_to_current_loop.h
index 6461b1c9af9..c9eda2ac3f8 100644
--- a/chromium/media/base/bind_to_current_loop.h
+++ b/chromium/media/base/bind_to_current_loop.h
@@ -1,9 +1,4 @@
-// This file was GENERATED by command:
-// pump.py bind_to_current_loop.h.pump
-// DO NOT EDIT BY HAND!!!
-
-
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -14,6 +9,7 @@
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
// This is a helper utility for base::Bind()ing callbacks to the current
// MessageLoop. The typical use is when |a| (of class |A|) wants to hand a
@@ -44,117 +40,29 @@ template <typename T>
base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
ScopedVector<T>& p) { return base::Passed(&p); }
-template <typename T> struct TrampolineHelper;
-
-template <>
-struct TrampolineHelper<void()> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void()>& cb) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb));
- }
-};
-
+// First, tell the compiler TrampolineHelper is a struct template with one
+// type parameter. Then define specializations where the type is a function
+// returning void and taking zero or more arguments.
+template <typename Sig> struct TrampolineHelper;
-template <typename A1>
-struct TrampolineHelper<void(A1)> {
+template <typename... Args>
+struct TrampolineHelper<void(Args...)> {
static void Run(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void(A1)>& cb, A1 a1) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb,
- internal::TrampolineForward(a1)));
+ const base::Callback<void(Args...)>& cb,
+ Args... args) {
+ task_runner->PostTask(FROM_HERE,
+ base::Bind(cb, TrampolineForward(args)...));
}
};
-
-template <typename A1, typename A2>
-struct TrampolineHelper<void(A1, A2)> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void(A1, A2)>& cb, A1 a1, A2 a2) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb,
- internal::TrampolineForward(a1), internal::TrampolineForward(a2)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3>
-struct TrampolineHelper<void(A1, A2, A3)> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void(A1, A2, A3)>& cb, A1 a1, A2 a2, A3 a3) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb,
- internal::TrampolineForward(a1), internal::TrampolineForward(a2),
- internal::TrampolineForward(a3)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4>
-struct TrampolineHelper<void(A1, A2, A3, A4)> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void(A1, A2, A3, A4)>& cb, A1 a1, A2 a2, A3 a3,
- A4 a4) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb,
- internal::TrampolineForward(a1), internal::TrampolineForward(a2),
- internal::TrampolineForward(a3), internal::TrampolineForward(a4)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5)> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void(A1, A2, A3, A4, A5)>& cb, A1 a1, A2 a2, A3 a3,
- A4 a4, A5 a5) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb,
- internal::TrampolineForward(a1), internal::TrampolineForward(a2),
- internal::TrampolineForward(a3), internal::TrampolineForward(a4),
- internal::TrampolineForward(a5)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5,
- typename A6>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6)> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void(A1, A2, A3, A4, A5, A6)>& cb, A1 a1, A2 a2,
- A3 a3, A4 a4, A5 a5, A6 a6) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb,
- internal::TrampolineForward(a1), internal::TrampolineForward(a2),
- internal::TrampolineForward(a3), internal::TrampolineForward(a4),
- internal::TrampolineForward(a5), internal::TrampolineForward(a6)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5,
- typename A6, typename A7>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6, A7)> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void(A1, A2, A3, A4, A5, A6, A7)>& cb, A1 a1, A2 a2,
- A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb,
- internal::TrampolineForward(a1), internal::TrampolineForward(a2),
- internal::TrampolineForward(a3), internal::TrampolineForward(a4),
- internal::TrampolineForward(a5), internal::TrampolineForward(a6),
- internal::TrampolineForward(a7)));
- }
-};
-
-
} // namespace internal
template<typename T>
static base::Callback<T> BindToCurrentLoop(
const base::Callback<T>& cb) {
return base::Bind(&internal::TrampolineHelper<T>::Run,
- base::MessageLoopProxy::current(), cb);
+ base::ThreadTaskRunnerHandle::Get(), cb);
}
} // namespace media
diff --git a/chromium/media/base/bind_to_current_loop.h.pump b/chromium/media/base/bind_to_current_loop.h.pump
deleted file mode 100644
index 4db40f1c1f9..00000000000
--- a/chromium/media/base/bind_to_current_loop.h.pump
+++ /dev/null
@@ -1,86 +0,0 @@
-$$ This is a pump file for generating file templates. Pump is a python
-$$ script that is part of the Google Test suite of utilities. Description
-$$ can be found here:
-$$
-$$ http://code.google.com/p/googletest/wiki/PumpManual
-$$
-
-$$ See comment for MAX_ARITY in base/bind.h.pump.
-$var MAX_ARITY = 7
-
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
-#define MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "base/single_thread_task_runner.h"
-
-// This is a helper utility for base::Bind()ing callbacks to the current
-// MessageLoop. The typical use is when |a| (of class |A|) wants to hand a
-// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
-// when |b| executes the callback, it does so on |a|'s current MessageLoop.
-//
-// Typical usage: request to be called back on the current thread:
-// other->StartAsyncProcessAndCallMeBack(
-// media::BindToCurrentLoop(base::Bind(&MyClass::MyMethod, this)));
-//
-// Note that like base::Bind(), BindToCurrentLoop() can't bind non-constant
-// references, and that *unlike* base::Bind(), BindToCurrentLoop() makes copies
-// of its arguments, and thus can't be used with arrays.
-
-namespace media {
-
-// Mimic base::internal::CallbackForward, replacing p.Pass() with
-// base::Passed(&p) to account for the extra layer of indirection.
-namespace internal {
-template <typename T>
-T& TrampolineForward(T& t) { return t; }
-
-template <typename T, typename R>
-base::internal::PassedWrapper<scoped_ptr<T, R> > TrampolineForward(
- scoped_ptr<T, R>& p) { return base::Passed(&p); }
-
-template <typename T>
-base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
- ScopedVector<T>& p) { return base::Passed(&p); }
-
-template <typename T> struct TrampolineHelper;
-
-$range ARITY 0..MAX_ARITY
-$for ARITY [[
-$range ARG 1..ARITY
-
-template <$for ARG , [[typename A$(ARG)]]>
-struct TrampolineHelper<void($for ARG , [[A$(ARG)]])> {
- static void Run(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Callback<void($for ARG , [[A$(ARG)]])>& cb
-$if ARITY != 0 [[, ]]
-$for ARG , [[A$(ARG) a$(ARG)]]
-) {
- task_runner->PostTask(FROM_HERE, base::Bind(cb
-$if ARITY != 0 [[, ]]
-$for ARG , [[internal::TrampolineForward(a$(ARG))]]));
- }
-};
-
-
-]] $$ for ARITY
-
-} // namespace internal
-
-template<typename T>
-static base::Callback<T> BindToCurrentLoop(
- const base::Callback<T>& cb) {
- return base::Bind(&internal::TrampolineHelper<T>::Run,
- base::MessageLoopProxy::current(), cb);
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
diff --git a/chromium/media/base/browser_cdm.h b/chromium/media/base/browser_cdm.h
index f009779324d..f4902d8708b 100644
--- a/chromium/media/base/browser_cdm.h
+++ b/chromium/media/base/browser_cdm.h
@@ -12,44 +12,9 @@
namespace media {
// Interface for browser side CDMs.
-class MEDIA_EXPORT BrowserCdm : public PlayerTracker {
+class MEDIA_EXPORT BrowserCdm : public MediaKeys, public PlayerTracker {
public:
- // TODO(jrummell): Update this to actually derive from MediaKeys
- // (Use web_session_id rather than session_id).
- typedef base::Callback<
- void(uint32 session_id, const std::string& web_session_id)>
- SessionCreatedCB;
-
- typedef base::Callback<void(uint32 session_id,
- const std::vector<uint8>& message,
- const GURL& destination_url)> SessionMessageCB;
-
- typedef base::Callback<void(uint32 session_id)> SessionReadyCB;
-
- typedef base::Callback<void(uint32 session_id)> SessionClosedCB;
-
- typedef base::Callback<void(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- uint32 system_code)> SessionErrorCB;
-
- virtual ~BrowserCdm();
-
- // MediaKeys-like implementation.
- virtual bool CreateSession(uint32 session_id,
- const std::string& content_type,
- const uint8* init_data,
- int init_data_length) = 0;
- virtual void LoadSession(uint32 session_id,
- const std::string& web_session_id) = 0;
- virtual void UpdateSession(uint32 session_id,
- const uint8* response,
- int response_length) = 0;
- virtual void ReleaseSession(uint32 session_id) = 0;
-
- // PlayerTracker implementation.
- virtual int RegisterPlayer(const base::Closure& new_key_cb,
- const base::Closure& cdm_unset_cb) = 0;
- virtual void UnregisterPlayer(int registration_id) = 0;
+ ~BrowserCdm() override;
protected:
BrowserCdm();
diff --git a/chromium/media/base/browser_cdm_factory.cc b/chromium/media/base/browser_cdm_factory.cc
new file mode 100644
index 00000000000..88445d890b7
--- /dev/null
+++ b/chromium/media/base/browser_cdm_factory.cc
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/browser_cdm_factory.h"
+
+#include "base/logging.h"
+
+#if defined(OS_ANDROID)
+#include "media/base/android/browser_cdm_factory_android.h"
+#endif
+
+namespace media {
+
+namespace {
+BrowserCdmFactory* g_cdm_factory = NULL;
+}
+
+void SetBrowserCdmFactory(BrowserCdmFactory* factory) {
+ DCHECK(!g_cdm_factory);
+ g_cdm_factory = factory;
+}
+
+scoped_ptr<BrowserCdm> CreateBrowserCdm(
+ const std::string& key_system,
+ bool use_hw_secure_codecs,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb) {
+ if (!g_cdm_factory) {
+#if defined(OS_ANDROID)
+ SetBrowserCdmFactory(new BrowserCdmFactoryAndroid);
+#else
+ LOG(ERROR) << "Cannot create BrowserCdm: no BrowserCdmFactory available!";
+ return scoped_ptr<BrowserCdm>();
+#endif
+ }
+
+ return g_cdm_factory->CreateBrowserCdm(
+ key_system, use_hw_secure_codecs, session_message_cb, session_closed_cb,
+ legacy_session_error_cb, session_keys_change_cb,
+ session_expiration_update_cb);
+}
+
+} // namespace media
diff --git a/chromium/media/base/browser_cdm_factory.h b/chromium/media/base/browser_cdm_factory.h
index e6fa47bcaf4..2ccf1b4108a 100644
--- a/chromium/media/base/browser_cdm_factory.h
+++ b/chromium/media/base/browser_cdm_factory.h
@@ -7,22 +7,48 @@
#include <string>
+#include "base/macros.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/browser_cdm.h"
#include "media/base/media_export.h"
namespace media {
+class MEDIA_EXPORT BrowserCdmFactory {
+ public:
+ BrowserCdmFactory() {}
+ virtual ~BrowserCdmFactory() {}
+
+ virtual scoped_ptr<BrowserCdm> CreateBrowserCdm(
+ const std::string& key_system,
+ bool use_hw_secure_codecs,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BrowserCdmFactory);
+};
+
+// Provides a factory for creating BrowserCdm instances. There is only one
+// BrowserCdmFactory per process.
+void SetBrowserCdmFactory(BrowserCdmFactory* factory);
+
// Creates a BrowserCdm for |key_system|. Returns NULL if the CDM cannot be
// created.
+// |use_hw_secure_codecs| indicates that the CDM should be configured to use
+// hardware-secure codecs (for platforms that support it).
// TODO(xhwang): Add ifdef for IPC based CDM.
scoped_ptr<BrowserCdm> MEDIA_EXPORT
- CreateBrowserCdm(const std::string& key_system,
- const BrowserCdm::SessionCreatedCB& session_created_cb,
- const BrowserCdm::SessionMessageCB& session_message_cb,
- const BrowserCdm::SessionReadyCB& session_ready_cb,
- const BrowserCdm::SessionClosedCB& session_closed_cb,
- const BrowserCdm::SessionErrorCB& session_error_cb);
+CreateBrowserCdm(const std::string& key_system,
+ bool use_hw_secure_codecs,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb);
} // namespace media
diff --git a/chromium/media/base/cdm_callback_promise.cc b/chromium/media/base/cdm_callback_promise.cc
index 7b952ab768c..a198f45b262 100644
--- a/chromium/media/base/cdm_callback_promise.cc
+++ b/chromium/media/base/cdm_callback_promise.cc
@@ -38,6 +38,5 @@ void CdmCallbackPromise<T...>::reject(MediaKeys::Exception exception_code,
// Explicit template instantiation for the Promises needed.
template class MEDIA_EXPORT CdmCallbackPromise<>;
template class MEDIA_EXPORT CdmCallbackPromise<std::string>;
-template class MEDIA_EXPORT CdmCallbackPromise<KeyIdsVector>;
} // namespace media
diff --git a/chromium/media/base/cdm_config.h b/chromium/media/base/cdm_config.h
new file mode 100644
index 00000000000..4b57ebb96c1
--- /dev/null
+++ b/chromium/media/base/cdm_config.h
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CDM_CONFIG_H_
+#define MEDIA_BASE_CDM_CONFIG_H_
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// The runtime configuration for new CDM instances as computed by
+// |requestMediaKeySystemAccess|. This is in some sense the Chromium-side
+// counterpart of Blink's WebMediaKeySystemConfiguration.
+struct MEDIA_EXPORT CdmConfig {
+ // Allow access to a distinctive identifier.
+ bool allow_distinctive_identifier = false;
+
+ // Allow access to persistent state.
+ bool allow_persistent_state = false;
+
+ // Use hardware-secure codecs. This flag is only used on Android, it should
+ // always be false on other platforms.
+ bool use_hw_secure_codecs = false;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_CDM_CONFIG_H_
diff --git a/chromium/media/base/cdm_context.cc b/chromium/media/base/cdm_context.cc
new file mode 100644
index 00000000000..ec2b633b3a3
--- /dev/null
+++ b/chromium/media/base/cdm_context.cc
@@ -0,0 +1,16 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/cdm_context.h"
+
+namespace media {
+
+CdmContext::CdmContext() {}
+
+CdmContext::~CdmContext() {}
+
+void IgnoreCdmAttached(bool success) {
+}
+
+} // namespace media
diff --git a/chromium/media/base/cdm_context.h b/chromium/media/base/cdm_context.h
new file mode 100644
index 00000000000..a0861c4e5db
--- /dev/null
+++ b/chromium/media/base/cdm_context.h
@@ -0,0 +1,53 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CDM_CONTEXT_H_
+#define MEDIA_BASE_CDM_CONTEXT_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class Decryptor;
+
+// An interface representing the context that a media pipeline needs from a
+// content decryption module (CDM) to decrypt (and decode) encrypted buffers.
+class MEDIA_EXPORT CdmContext {
+ public:
+ // Indicates an invalid CDM ID. See GetCdmId() for details.
+ static const int kInvalidCdmId = 0;
+
+ virtual ~CdmContext();
+
+ // Gets the Decryptor object associated with the CDM. Returns NULL if the CDM
+ // does not support a Decryptor. The returned object is only guaranteed to be
+ // valid during the CDM's lifetime.
+ virtual Decryptor* GetDecryptor() = 0;
+
+ // Returns an ID associated with the CDM, which can be used to locate the real
+ // CDM instance. This is useful when the CDM is hosted remotely, e.g. in a
+ // different process.
+ // Returns kInvalidCdmId if the CDM cannot be used remotely. In this case,
+ // GetDecryptor() should return a non-null Decryptor.
+ virtual int GetCdmId() const = 0;
+
+ protected:
+ CdmContext();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CdmContext);
+};
+
+// Callback to notify that the CdmContext has been completely attached to
+// the media pipeline. Parameter indicates whether the operation succeeded.
+typedef base::Callback<void(bool)> CdmAttachedCB;
+
+// A dummy implementation of CdmAttachedCB.
+MEDIA_EXPORT void IgnoreCdmAttached(bool success);
+
+} // namespace media
+
+#endif // MEDIA_BASE_CDM_CONTEXT_H_
diff --git a/chromium/media/base/cdm_factory.h b/chromium/media/base/cdm_factory.h
index 570c042db65..4a5cfb7f880 100644
--- a/chromium/media/base/cdm_factory.h
+++ b/chromium/media/base/cdm_factory.h
@@ -15,20 +15,30 @@ class GURL;
namespace media {
+// Callback used when CDM is created. |error_message| only used if
+// MediaKeys is null (i.e. CDM can't be created).
+using CdmCreatedCB = base::Callback<void(scoped_ptr<MediaKeys>,
+ const std::string& error_message)>;
+
+struct CdmConfig;
+
class MEDIA_EXPORT CdmFactory {
public:
CdmFactory();
virtual ~CdmFactory();
- virtual scoped_ptr<MediaKeys> Create(
+ // Creates a CDM for |key_system| and returns it through |cdm_created_cb|
+ // asynchronously.
+ virtual void Create(
const std::string& key_system,
const GURL& security_origin,
+ const CdmConfig& cdm_config,
const SessionMessageCB& session_message_cb,
- const SessionReadyCB& session_ready_cb,
const SessionClosedCB& session_closed_cb,
- const SessionErrorCB& session_error_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
const SessionKeysChangeCB& session_keys_change_cb,
- const SessionExpirationUpdateCB& session_expiration_update_cb) = 0;
+ const SessionExpirationUpdateCB& session_expiration_update_cb,
+ const CdmCreatedCB& cdm_created_cb) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(CdmFactory);
diff --git a/chromium/media/base/cdm_key_information.cc b/chromium/media/base/cdm_key_information.cc
new file mode 100644
index 00000000000..ebaf8b135ca
--- /dev/null
+++ b/chromium/media/base/cdm_key_information.cc
@@ -0,0 +1,16 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/cdm_key_information.h"
+
+namespace media {
+
+CdmKeyInformation::CdmKeyInformation()
+ : status(INTERNAL_ERROR), system_code(0) {
+}
+
+CdmKeyInformation::~CdmKeyInformation() {
+}
+
+} // namespace media
diff --git a/chromium/media/base/cdm_key_information.h b/chromium/media/base/cdm_key_information.h
new file mode 100644
index 00000000000..4f9d8e08db7
--- /dev/null
+++ b/chromium/media/base/cdm_key_information.h
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CDM_KEY_INFORMATION_H_
+#define MEDIA_BASE_CDM_KEY_INFORMATION_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+struct MEDIA_EXPORT CdmKeyInformation {
+ enum KeyStatus {
+ USABLE = 0,
+ INTERNAL_ERROR = 1,
+ EXPIRED = 2,
+ OUTPUT_NOT_ALLOWED = 3,
+ OUTPUT_DOWNSCALED = 4,
+ KEY_STATUS_PENDING = 5,
+ KEY_STATUS_MAX = KEY_STATUS_PENDING
+ };
+
+ CdmKeyInformation();
+ ~CdmKeyInformation();
+
+ std::vector<uint8> key_id;
+ KeyStatus status;
+ uint32 system_code;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_CDM_KEY_INFORMATION_H_
diff --git a/chromium/media/base/cdm_promise.h b/chromium/media/base/cdm_promise.h
index df8a7521c43..93515477dad 100644
--- a/chromium/media/base/cdm_promise.h
+++ b/chromium/media/base/cdm_promise.h
@@ -57,8 +57,6 @@ class MEDIA_EXPORT CdmPromise {
// For some reason the Windows compiler is not happy with the implementation
// of CdmPromiseTemplate being in the .cc file, so moving it here.
-namespace {
-
template <typename... T>
struct CdmPromiseTraits {};
@@ -72,14 +70,6 @@ struct CdmPromiseTraits<std::string> {
static const CdmPromise::ResolveParameterType kType = CdmPromise::STRING_TYPE;
};
-template <>
-struct CdmPromiseTraits<KeyIdsVector> {
- static const CdmPromise::ResolveParameterType kType =
- CdmPromise::KEY_IDS_VECTOR_TYPE;
-};
-
-} // namespace
-
// This class adds the resolve(T) method. This class is still an interface, and
// is used as the type of promise that gets passed around.
template <typename... T>
@@ -96,7 +86,7 @@ class MEDIA_EXPORT CdmPromiseTemplate : public CdmPromise {
uint32 system_code,
const std::string& error_message) = 0;
- virtual ResolveParameterType GetResolveParameterType() const override {
+ ResolveParameterType GetResolveParameterType() const override {
return CdmPromiseTraits<T...>::kType;
}
diff --git a/chromium/media/base/cdm_promise_adapter.cc b/chromium/media/base/cdm_promise_adapter.cc
new file mode 100644
index 00000000000..70993b580ec
--- /dev/null
+++ b/chromium/media/base/cdm_promise_adapter.cc
@@ -0,0 +1,78 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/cdm_promise_adapter.h"
+
+#include "media/base/media_keys.h"
+
+namespace media {
+
+CdmPromiseAdapter::CdmPromiseAdapter() : next_promise_id_(1) {
+}
+
+CdmPromiseAdapter::~CdmPromiseAdapter() {
+ DCHECK(promises_.empty());
+ Clear();
+}
+
+uint32_t CdmPromiseAdapter::SavePromise(scoped_ptr<CdmPromise> promise) {
+ uint32_t promise_id = next_promise_id_++;
+ promises_.add(promise_id, promise.Pass());
+ return promise_id;
+}
+
+template <typename... T>
+void CdmPromiseAdapter::ResolvePromise(uint32_t promise_id,
+ const T&... result) {
+ scoped_ptr<CdmPromise> promise = TakePromise(promise_id);
+ if (!promise) {
+ NOTREACHED() << "Promise not found for " << promise_id;
+ return;
+ }
+
+ // Sanity check the type before we do static_cast.
+ CdmPromise::ResolveParameterType type = promise->GetResolveParameterType();
+ CdmPromise::ResolveParameterType expected = CdmPromiseTraits<T...>::kType;
+ if (type != expected) {
+ NOTREACHED() << "Promise type mismatch: " << type << " vs " << expected;
+ return;
+ }
+
+ static_cast<CdmPromiseTemplate<T...>*>(promise.get())->resolve(result...);
+}
+
+void CdmPromiseAdapter::RejectPromise(uint32_t promise_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ scoped_ptr<CdmPromise> promise = TakePromise(promise_id);
+ if (!promise) {
+ NOTREACHED() << "No promise found for promise_id " << promise_id;
+ return;
+ }
+
+ promise->reject(exception_code, system_code, error_message);
+}
+
+void CdmPromiseAdapter::Clear() {
+ // Reject all outstanding promises.
+ for (auto& promise : promises_)
+ promise.second->reject(MediaKeys::UNKNOWN_ERROR, 0, "Operation aborted.");
+ promises_.clear();
+}
+
+scoped_ptr<CdmPromise> CdmPromiseAdapter::TakePromise(uint32_t promise_id) {
+ PromiseMap::iterator it = promises_.find(promise_id);
+ if (it == promises_.end())
+ return nullptr;
+ return promises_.take_and_erase(it);
+}
+
+// Explicit instantiation of function templates.
+template MEDIA_EXPORT void CdmPromiseAdapter::ResolvePromise(uint32_t);
+template MEDIA_EXPORT void CdmPromiseAdapter::ResolvePromise(
+ uint32_t,
+ const std::string&);
+
+} // namespace media
diff --git a/chromium/media/base/cdm_promise_adapter.h b/chromium/media/base/cdm_promise_adapter.h
new file mode 100644
index 00000000000..b078c0dd0a5
--- /dev/null
+++ b/chromium/media/base/cdm_promise_adapter.h
@@ -0,0 +1,59 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CDM_PROMISE_ADAPTER_H_
+#define MEDIA_BASE_CDM_PROMISE_ADAPTER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/cdm_promise.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Helps convert CdmPromises to an integer identifier and vice versa. The
+// integer identifier is needed where we cannot pass CdmPromises through, such
+// as PPAPI, IPC and JNI.
+class MEDIA_EXPORT CdmPromiseAdapter {
+ public:
+ CdmPromiseAdapter();
+ ~CdmPromiseAdapter();
+
+ // Takes ownership of |promise| and returns an integer promise ID.
+ uint32_t SavePromise(scoped_ptr<media::CdmPromise> promise);
+
+ // Takes the promise for |promise_id|, sanity checks its |type|, and resolves
+ // it with |result|.
+ template <typename... T>
+ void ResolvePromise(uint32_t promise_id, const T&... result);
+
+ // Takes the promise for |promise_id| and rejects it with |exception_code|,
+ // |system_code| and |error_message|.
+ void RejectPromise(uint32_t promise_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message);
+
+ // Rejects and clears all |promises_|.
+ void Clear();
+
+ private:
+ // A map between promise IDs and CdmPromises. It owns the CdmPromises.
+ typedef base::ScopedPtrHashMap<uint32_t, scoped_ptr<CdmPromise>> PromiseMap;
+
+ // Finds, takes the ownership of and returns the promise for |promise_id|.
+ // Returns null if no promise can be found.
+ scoped_ptr<CdmPromise> TakePromise(uint32_t promise_id);
+
+ uint32_t next_promise_id_;
+ PromiseMap promises_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmPromiseAdapter);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_CDM_PROMISE_ADAPTER_H_
diff --git a/chromium/media/base/channel_layout.h b/chromium/media/base/channel_layout.h
index b57f1529640..d44a746a61f 100644
--- a/chromium/media/base/channel_layout.h
+++ b/chromium/media/base/channel_layout.h
@@ -39,22 +39,22 @@ enum ChannelLayout {
// Front L, Front R, Front C, Side L, Side R
CHANNEL_LAYOUT_5_0 = 9,
- // Front L, Front R, Front C, Side L, Side R, LFE
+ // Front L, Front R, Front C, LFE, Side L, Side R
CHANNEL_LAYOUT_5_1 = 10,
// Front L, Front R, Front C, Back L, Back R
CHANNEL_LAYOUT_5_0_BACK = 11,
- // Front L, Front R, Front C, Back L, Back R, LFE
+ // Front L, Front R, Front C, LFE, Back L, Back R
CHANNEL_LAYOUT_5_1_BACK = 12,
// Front L, Front R, Front C, Side L, Side R, Back L, Back R
CHANNEL_LAYOUT_7_0 = 13,
- // Front L, Front R, Front C, Side L, Side R, LFE, Back L, Back R
+ // Front L, Front R, Front C, LFE, Side L, Side R, Back L, Back R
CHANNEL_LAYOUT_7_1 = 14,
- // Front L, Front R, Front C, Side L, Side R, LFE, Front LofC, Front RofC
+ // Front L, Front R, Front C, LFE, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE = 15,
// Stereo L, Stereo R
@@ -75,13 +75,13 @@ enum ChannelLayout {
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_6_0_FRONT = 21,
- // Stereo L, Stereo R, Side L, Side R, Front C, Rear C.
+ // Stereo L, Stereo R, Front C, Rear L, Rear R, Rear C
CHANNEL_LAYOUT_HEXAGONAL = 22,
- // Stereo L, Stereo R, Side L, Side R, Front C, Rear Center, LFE
+ // Stereo L, Stereo R, Front C, LFE, Side L, Side R, Rear Center
CHANNEL_LAYOUT_6_1 = 23,
- // Stereo L, Stereo R, Back L, Back R, Front C, Rear Center, LFE
+ // Stereo L, Stereo R, Front C, LFE, Back L, Back R, Rear Center
CHANNEL_LAYOUT_6_1_BACK = 24,
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE
@@ -90,10 +90,10 @@ enum ChannelLayout {
// Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_0_FRONT = 26,
- // Front L, Front R, Front C, Back L, Back R, LFE, Front LofC, Front RofC
+ // Front L, Front R, Front C, LFE, Back L, Back R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE_BACK = 27,
- // Front L, Front R, Front C, Side L, Side R, Rear C, Back L, Back R.
+ // Front L, Front R, Front C, Side L, Side R, Rear L, Back R, Back C.
CHANNEL_LAYOUT_OCTAGONAL = 28,
// Channels are not explicitly mapped to speakers.
diff --git a/chromium/media/base/channel_mixer_unittest.cc b/chromium/media/base/channel_mixer_unittest.cc
index 07d64eb0359..89b4eeedc44 100644
--- a/chromium/media/base/channel_mixer_unittest.cc
+++ b/chromium/media/base/channel_mixer_unittest.cc
@@ -54,7 +54,7 @@ TEST(ChannelMixerTest, ConstructAllPossibleLayouts) {
struct ChannelMixerTestData {
ChannelMixerTestData(ChannelLayout input_layout, ChannelLayout output_layout,
- float* channel_values, int num_channel_values,
+ const float* channel_values, int num_channel_values,
float scale)
: input_layout(input_layout),
output_layout(output_layout),
@@ -67,7 +67,7 @@ struct ChannelMixerTestData {
ChannelMixerTestData(ChannelLayout input_layout, int input_channels,
ChannelLayout output_layout, int output_channels,
- float* channel_values, int num_channel_values)
+ const float* channel_values, int num_channel_values)
: input_layout(input_layout),
input_channels(input_channels),
output_layout(output_layout),
@@ -87,7 +87,7 @@ struct ChannelMixerTestData {
int input_channels;
ChannelLayout output_layout;
int output_channels;
- float* channel_values;
+ const float* channel_values;
int num_channel_values;
float scale;
};
diff --git a/chromium/media/base/container_names.cc b/chromium/media/base/container_names.cc
index 7b188b6b04c..48523d11ee7 100644
--- a/chromium/media/base/container_names.cc
+++ b/chromium/media/base/container_names.cc
@@ -7,7 +7,6 @@
#include <cctype>
#include <limits>
-#include "base/basictypes.h"
#include "base/logging.h"
#include "media/base/bit_reader.h"
@@ -334,7 +333,7 @@ static bool CheckDV(const uint8* buffer, int buffer_size) {
int offset = 0;
int current_sequence_number = -1;
- int last_block_number[6];
+ int last_block_number[6] = {0};
while (offset + 11 < buffer_size) {
BitReader reader(buffer + offset, 11);
diff --git a/chromium/media/base/data_buffer.cc b/chromium/media/base/data_buffer.cc
index d0b40eed1a5..9d6afaf8d2f 100644
--- a/chromium/media/base/data_buffer.cc
+++ b/chromium/media/base/data_buffer.cc
@@ -4,7 +4,6 @@
#include "media/base/data_buffer.h"
-#include "base/logging.h"
namespace media {
diff --git a/chromium/media/base/decoder_buffer.cc b/chromium/media/base/decoder_buffer.cc
index 673610b6874..a8624b1e8e7 100644
--- a/chromium/media/base/decoder_buffer.cc
+++ b/chromium/media/base/decoder_buffer.cc
@@ -4,22 +4,32 @@
#include "media/base/decoder_buffer.h"
-#include "base/logging.h"
-#include "media/base/buffers.h"
-#include "media/base/decrypt_config.h"
namespace media {
+// Allocates a block of memory which is padded for use with the SIMD
+// optimizations used by FFmpeg.
+static uint8* AllocateFFmpegSafeBlock(int size) {
+ uint8* const block = reinterpret_cast<uint8*>(base::AlignedAlloc(
+ size + DecoderBuffer::kPaddingSize, DecoderBuffer::kAlignmentSize));
+ memset(block + size, 0, DecoderBuffer::kPaddingSize);
+ return block;
+}
+
DecoderBuffer::DecoderBuffer(int size)
: size_(size),
- side_data_size_(0) {
+ side_data_size_(0),
+ is_key_frame_(false) {
Initialize();
}
-DecoderBuffer::DecoderBuffer(const uint8* data, int size,
- const uint8* side_data, int side_data_size)
+DecoderBuffer::DecoderBuffer(const uint8* data,
+ int size,
+ const uint8* side_data,
+ int side_data_size)
: size_(size),
- side_data_size_(side_data_size) {
+ side_data_size_(side_data_size),
+ is_key_frame_(false) {
if (!data) {
CHECK_EQ(size_, 0);
CHECK(!side_data);
@@ -27,23 +37,26 @@ DecoderBuffer::DecoderBuffer(const uint8* data, int size,
}
Initialize();
+
+ DCHECK_GE(size_, 0);
memcpy(data_.get(), data, size_);
- if (side_data)
- memcpy(side_data_.get(), side_data, side_data_size_);
+
+ if (!side_data) {
+ CHECK_EQ(side_data_size, 0);
+ return;
+ }
+
+ DCHECK_GT(side_data_size_, 0);
+ memcpy(side_data_.get(), side_data, side_data_size_);
}
DecoderBuffer::~DecoderBuffer() {}
void DecoderBuffer::Initialize() {
CHECK_GE(size_, 0);
- data_.reset(reinterpret_cast<uint8*>(
- base::AlignedAlloc(size_ + kPaddingSize, kAlignmentSize)));
- memset(data_.get() + size_, 0, kPaddingSize);
- if (side_data_size_ > 0) {
- side_data_.reset(reinterpret_cast<uint8*>(
- base::AlignedAlloc(side_data_size_ + kPaddingSize, kAlignmentSize)));
- memset(side_data_.get() + side_data_size_, 0, kPaddingSize);
- }
+ data_.reset(AllocateFFmpegSafeBlock(size_));
+ if (side_data_size_ > 0)
+ side_data_.reset(AllocateFFmpegSafeBlock(side_data_size_));
splice_timestamp_ = kNoTimestamp();
}
@@ -82,6 +95,7 @@ std::string DecoderBuffer::AsHumanReadableString() {
<< " duration: " << duration_.InMicroseconds()
<< " size: " << size_
<< " side_data_size: " << side_data_size_
+ << " is_key_frame: " << is_key_frame_
<< " encrypted: " << (decrypt_config_ != NULL)
<< " discard_padding (ms): (" << discard_padding_.first.InMilliseconds()
<< ", " << discard_padding_.second.InMilliseconds() << ")";
@@ -93,4 +107,16 @@ void DecoderBuffer::set_timestamp(base::TimeDelta timestamp) {
timestamp_ = timestamp;
}
+void DecoderBuffer::CopySideDataFrom(const uint8* side_data,
+ int side_data_size) {
+ if (side_data_size > 0) {
+ side_data_size_ = side_data_size;
+ side_data_.reset(AllocateFFmpegSafeBlock(side_data_size_));
+ memcpy(side_data_.get(), side_data, side_data_size_);
+ } else {
+ side_data_.reset();
+ side_data_size_ = 0;
+ }
+}
+
} // namespace media
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
index 092b2130c50..665313ddcc1 100644
--- a/chromium/media/base/decoder_buffer.h
+++ b/chromium/media/base/decoder_buffer.h
@@ -42,16 +42,18 @@ class MEDIA_EXPORT DecoderBuffer
};
// Allocates buffer with |size| >= 0. Buffer will be padded and aligned
- // as necessary.
+ // as necessary, and |is_key_frame_| will default to false.
explicit DecoderBuffer(int size);
// Create a DecoderBuffer whose |data_| is copied from |data|. Buffer will be
// padded and aligned as necessary. |data| must not be NULL and |size| >= 0.
+ // The buffer's |is_key_frame_| will default to false.
static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size);
// Create a DecoderBuffer whose |data_| is copied from |data| and |side_data_|
// is copied from |side_data|. Buffers will be padded and aligned as necessary
- // Data pointers must not be NULL and sizes must be >= 0.
+ // Data pointers must not be NULL and sizes must be >= 0. The buffer's
+ // |is_key_frame_| will default to false.
static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size,
const uint8* side_data,
int side_data_size);
@@ -154,15 +156,29 @@ class MEDIA_EXPORT DecoderBuffer
splice_timestamp_ = splice_timestamp;
}
+ bool is_key_frame() const {
+ DCHECK(!end_of_stream());
+ return is_key_frame_;
+ }
+
+ void set_is_key_frame(bool is_key_frame) {
+ DCHECK(!end_of_stream());
+ is_key_frame_ = is_key_frame;
+ }
+
// Returns a human-readable string describing |*this|.
std::string AsHumanReadableString();
+ // Replaces any existing side data with data copied from |side_data|.
+ void CopySideDataFrom(const uint8* side_data, int side_data_size);
+
protected:
friend class base::RefCountedThreadSafe<DecoderBuffer>;
// Allocates a buffer of size |size| >= 0 and copies |data| into it. Buffer
// will be padded and aligned as necessary. If |data| is NULL then |data_| is
- // set to NULL and |buffer_size_| to 0.
+ // set to NULL and |buffer_size_| to 0. |is_key_frame_| will default to
+ // false.
DecoderBuffer(const uint8* data, int size,
const uint8* side_data, int side_data_size);
virtual ~DecoderBuffer();
@@ -178,6 +194,7 @@ class MEDIA_EXPORT DecoderBuffer
scoped_ptr<DecryptConfig> decrypt_config_;
DiscardPadding discard_padding_;
base::TimeDelta splice_timestamp_;
+ bool is_key_frame_;
// Constructor helper method for memory allocations.
void Initialize();
diff --git a/chromium/media/base/decoder_buffer_unittest.cc b/chromium/media/base/decoder_buffer_unittest.cc
index c5a03b78450..c868a362314 100644
--- a/chromium/media/base/decoder_buffer_unittest.cc
+++ b/chromium/media/base/decoder_buffer_unittest.cc
@@ -13,6 +13,7 @@ TEST(DecoderBufferTest, Constructors) {
EXPECT_TRUE(buffer->data());
EXPECT_EQ(0, buffer->data_size());
EXPECT_FALSE(buffer->end_of_stream());
+ EXPECT_FALSE(buffer->is_key_frame());
const int kTestSize = 10;
scoped_refptr<DecoderBuffer> buffer3(new DecoderBuffer(kTestSize));
@@ -28,6 +29,7 @@ TEST(DecoderBufferTest, CreateEOSBuffer) {
TEST(DecoderBufferTest, CopyFrom) {
const uint8 kData[] = "hello";
const int kDataSize = arraysize(kData);
+
scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8*>(&kData), kDataSize));
ASSERT_TRUE(buffer2.get());
@@ -35,6 +37,8 @@ TEST(DecoderBufferTest, CopyFrom) {
EXPECT_EQ(buffer2->data_size(), kDataSize);
EXPECT_EQ(0, memcmp(buffer2->data(), kData, kDataSize));
EXPECT_FALSE(buffer2->end_of_stream());
+ EXPECT_FALSE(buffer2->is_key_frame());
+
scoped_refptr<DecoderBuffer> buffer3(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8*>(&kData), kDataSize,
reinterpret_cast<const uint8*>(&kData), kDataSize));
@@ -46,6 +50,7 @@ TEST(DecoderBufferTest, CopyFrom) {
EXPECT_EQ(buffer3->side_data_size(), kDataSize);
EXPECT_EQ(0, memcmp(buffer3->side_data(), kData, kDataSize));
EXPECT_FALSE(buffer3->end_of_stream());
+ EXPECT_FALSE(buffer3->is_key_frame());
}
#if !defined(OS_ANDROID)
@@ -72,6 +77,8 @@ TEST(DecoderBufferTest, PaddingAlignment) {
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
buffer2->data()) & (DecoderBuffer::kAlignmentSize - 1));
+
+ EXPECT_FALSE(buffer2->is_key_frame());
}
#endif
@@ -92,9 +99,34 @@ TEST(DecoderBufferTest, ReadingWriting) {
EXPECT_FALSE(buffer->end_of_stream());
}
-TEST(DecoderBufferTest, GetDecryptConfig) {
+TEST(DecoderBufferTest, DecryptConfig) {
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(0));
EXPECT_FALSE(buffer->decrypt_config());
+
+ const char kKeyId[] = "key id";
+ const char kIv[] = "0123456789abcdef";
+ std::vector<SubsampleEntry> subsamples;
+ subsamples.push_back(SubsampleEntry(10, 5));
+ subsamples.push_back(SubsampleEntry(15, 7));
+
+ DecryptConfig decrypt_config(kKeyId, kIv, subsamples);
+
+ buffer->set_decrypt_config(
+ make_scoped_ptr(new DecryptConfig(kKeyId, kIv, subsamples)));
+
+ EXPECT_TRUE(buffer->decrypt_config());
+ EXPECT_TRUE(buffer->decrypt_config()->Matches(decrypt_config));
+}
+
+TEST(DecoderBufferTest, IsKeyFrame) {
+ scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(0));
+ EXPECT_FALSE(buffer->is_key_frame());
+
+ buffer->set_is_key_frame(false);
+ EXPECT_FALSE(buffer->is_key_frame());
+
+ buffer->set_is_key_frame(true);
+ EXPECT_TRUE(buffer->is_key_frame());
}
} // namespace media
diff --git a/chromium/media/base/decrypt_config.cc b/chromium/media/base/decrypt_config.cc
index a47806504a7..7df9216ed98 100644
--- a/chromium/media/base/decrypt_config.cc
+++ b/chromium/media/base/decrypt_config.cc
@@ -21,4 +21,20 @@ DecryptConfig::DecryptConfig(const std::string& key_id,
DecryptConfig::~DecryptConfig() {}
+bool DecryptConfig::Matches(const DecryptConfig& config) const {
+ if (key_id() != config.key_id() || iv() != config.iv() ||
+ subsamples().size() != config.subsamples().size()) {
+ return false;
+ }
+
+ for (size_t i = 0; i < subsamples().size(); ++i) {
+ if ((subsamples()[i].clear_bytes != config.subsamples()[i].clear_bytes) ||
+ (subsamples()[i].cypher_bytes != config.subsamples()[i].cypher_bytes)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
} // namespace media
diff --git a/chromium/media/base/decrypt_config.h b/chromium/media/base/decrypt_config.h
index 86480aa9eed..febd0fe8669 100644
--- a/chromium/media/base/decrypt_config.h
+++ b/chromium/media/base/decrypt_config.h
@@ -24,6 +24,9 @@ namespace media {
// result, and then copying each byte from the decrypted block over the
// position of the corresponding encrypted byte.
struct SubsampleEntry {
+ SubsampleEntry() : clear_bytes(0), cypher_bytes(0) {}
+ SubsampleEntry(uint32 clear_bytes, uint32 cypher_bytes)
+ : clear_bytes(clear_bytes), cypher_bytes(cypher_bytes) {}
uint32 clear_bytes;
uint32 cypher_bytes;
};
@@ -50,6 +53,9 @@ class MEDIA_EXPORT DecryptConfig {
const std::string& iv() const { return iv_; }
const std::vector<SubsampleEntry>& subsamples() const { return subsamples_; }
+ // Returns true if all fields in |config| match this config.
+ bool Matches(const DecryptConfig& config) const;
+
private:
const std::string key_id_;
diff --git a/chromium/media/base/decryptor.h b/chromium/media/base/decryptor.h
index 292f2b34694..47b4645efd9 100644
--- a/chromium/media/base/decryptor.h
+++ b/chromium/media/base/decryptor.h
@@ -69,6 +69,9 @@ class MEDIA_EXPORT Decryptor {
// decrypted buffer must be NULL.
// - This parameter should not be set to kNeedMoreData.
// Second parameter: The decrypted buffer.
+ // - Only |data|, |data_size| and |timestamp| are set in the returned
+ // DecoderBuffer. The callback handler is responsible for setting other
+ // fields as appropriate.
typedef base::Callback<void(Status,
const scoped_refptr<DecoderBuffer>&)> DecryptCB;
@@ -102,8 +105,7 @@ class MEDIA_EXPORT Decryptor {
const DecoderInitCB& init_cb) = 0;
// Helper structure for managing multiple decoded audio buffers per input.
- // TODO(xhwang): Rename this to AudioFrames.
- typedef std::list<scoped_refptr<AudioBuffer> > AudioBuffers;
+ typedef std::list<scoped_refptr<AudioBuffer> > AudioFrames;
// Indicates completion of audio/video decrypt-and-decode operation.
//
@@ -121,7 +123,7 @@ class MEDIA_EXPORT Decryptor {
// - Set to kError if unexpected error has occurred. In this case the
// returned frame(s) must be NULL/empty.
// Second parameter: The decoded video frame or audio buffers.
- typedef base::Callback<void(Status, const AudioBuffers&)> AudioDecodeCB;
+ typedef base::Callback<void(Status, const AudioFrames&)> AudioDecodeCB;
typedef base::Callback<void(Status,
const scoped_refptr<VideoFrame>&)> VideoDecodeCB;
diff --git a/chromium/media/base/demuxer.h b/chromium/media/base/demuxer.h
index 4658107b88e..3075b595765 100644
--- a/chromium/media/base/demuxer.h
+++ b/chromium/media/base/demuxer.h
@@ -11,6 +11,7 @@
#include "media/base/data_source.h"
#include "media/base/demuxer_stream.h"
#include "media/base/demuxer_stream_provider.h"
+#include "media/base/eme_constants.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
@@ -48,8 +49,9 @@ class MEDIA_EXPORT Demuxer : public DemuxerStreamProvider {
// A new potentially encrypted stream has been parsed.
// First parameter - The type of initialization data.
// Second parameter - The initialization data associated with the stream.
- typedef base::Callback<void(const std::string& type,
- const std::vector<uint8>& init_data)> NeedKeyCB;
+ typedef base::Callback<void(EmeInitDataType type,
+ const std::vector<uint8>& init_data)>
+ EncryptedMediaInitDataCB;
Demuxer();
~Demuxer() override;
@@ -57,7 +59,8 @@ class MEDIA_EXPORT Demuxer : public DemuxerStreamProvider {
// Completes initialization of the demuxer.
//
// The demuxer does not own |host| as it is guaranteed to outlive the
- // lifetime of the demuxer. Don't delete it!
+ // lifetime of the demuxer. Don't delete it! |status_cb| must only be run
+ // after this method has returned.
virtual void Initialize(DemuxerHost* host,
const PipelineStatusCB& status_cb,
bool enable_text_tracks) = 0;
diff --git a/chromium/media/base/demuxer_perftest.cc b/chromium/media/base/demuxer_perftest.cc
index 339cfe136a2..9708e584222 100644
--- a/chromium/media/base/demuxer_perftest.cc
+++ b/chromium/media/base/demuxer_perftest.cc
@@ -43,8 +43,8 @@ static void QuitLoopWithStatus(base::MessageLoop* message_loop,
message_loop->PostTask(FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
}
-static void NeedKey(const std::string& type,
- const std::vector<uint8>& init_data) {
+static void OnEncryptedMediaInitData(EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data) {
VLOG(0) << "File is encrypted.";
}
@@ -175,11 +175,10 @@ static void RunDemuxerBenchmark(const std::string& filename) {
FileDataSource data_source;
ASSERT_TRUE(data_source.Initialize(file_path));
- Demuxer::NeedKeyCB need_key_cb = base::Bind(&NeedKey);
- FFmpegDemuxer demuxer(message_loop.message_loop_proxy(),
- &data_source,
- need_key_cb,
- new MediaLog());
+ Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb =
+ base::Bind(&OnEncryptedMediaInitData);
+ FFmpegDemuxer demuxer(message_loop.message_loop_proxy(), &data_source,
+ encrypted_media_init_data_cb, new MediaLog());
demuxer.Initialize(&demuxer_host,
base::Bind(&QuitLoopWithStatus, &message_loop),
@@ -188,11 +187,11 @@ static void RunDemuxerBenchmark(const std::string& filename) {
StreamReader stream_reader(&demuxer, false);
// Benchmark.
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
while (!stream_reader.IsDone()) {
stream_reader.Read();
}
- base::TimeTicks end = base::TimeTicks::HighResNow();
+ base::TimeTicks end = base::TimeTicks::Now();
total_time += (end - start).InSecondsF();
demuxer.Stop();
QuitLoopWithStatus(&message_loop, PIPELINE_OK);
diff --git a/chromium/media/base/demuxer_stream.cc b/chromium/media/base/demuxer_stream.cc
index e55a1638674..fff5ddf2b16 100644
--- a/chromium/media/base/demuxer_stream.cc
+++ b/chromium/media/base/demuxer_stream.cc
@@ -8,6 +8,12 @@ namespace media {
DemuxerStream::~DemuxerStream() {}
+// Most DemuxerStream implementations don't specify liveness. Returns unknown
+// liveness by default.
+DemuxerStream::Liveness DemuxerStream::liveness() const {
+ return DemuxerStream::LIVENESS_UNKNOWN;
+}
+
// Most DemuxerStream implementations don't need to convert bit stream.
// Do nothing by default.
void DemuxerStream::EnableBitstreamConverter() {}
diff --git a/chromium/media/base/demuxer_stream.h b/chromium/media/base/demuxer_stream.h
index fd590d37440..596ab67e836 100644
--- a/chromium/media/base/demuxer_stream.h
+++ b/chromium/media/base/demuxer_stream.h
@@ -26,6 +26,12 @@ class MEDIA_EXPORT DemuxerStream {
NUM_TYPES, // Always keep this entry as the last one!
};
+ enum Liveness {
+ LIVENESS_UNKNOWN,
+ LIVENESS_RECORDED,
+ LIVENESS_LIVE,
+ };
+
// Status returned in the Read() callback.
// kOk : Indicates the second parameter is Non-NULL and contains media data
// or the end of the stream.
@@ -58,16 +64,18 @@ class MEDIA_EXPORT DemuxerStream {
const scoped_refptr<DecoderBuffer>&)>ReadCB;
virtual void Read(const ReadCB& read_cb) = 0;
- // Returns the audio decoder configuration. It is an error to call this method
- // if type() != AUDIO.
+ // Returns the audio/video decoder configuration. It is an error to call the
+ // audio method on a video stream and vice versa. After |kConfigChanged| is
+ // returned in a Read(), the caller should call this method again to retrieve
+ // the new config.
virtual AudioDecoderConfig audio_decoder_config() = 0;
-
- // Returns the video decoder configuration. It is an error to call this method
- // if type() != VIDEO.
virtual VideoDecoderConfig video_decoder_config() = 0;
// Returns the type of stream.
- virtual Type type() = 0;
+ virtual Type type() const = 0;
+
+ // Returns liveness of the streams provided, i.e. whether recorded or live.
+ virtual Liveness liveness() const;
virtual void EnableBitstreamConverter();
diff --git a/chromium/media/base/demuxer_stream_provider.h b/chromium/media/base/demuxer_stream_provider.h
index 83082fc375d..1d270928fc5 100644
--- a/chromium/media/base/demuxer_stream_provider.h
+++ b/chromium/media/base/demuxer_stream_provider.h
@@ -12,12 +12,6 @@ namespace media {
class MEDIA_EXPORT DemuxerStreamProvider {
public:
- enum Liveness {
- LIVENESS_UNKNOWN,
- LIVENESS_RECORDED,
- LIVENESS_LIVE,
- };
-
DemuxerStreamProvider();
virtual ~DemuxerStreamProvider();
@@ -25,9 +19,6 @@ class MEDIA_EXPORT DemuxerStreamProvider {
// to be DemuxerStream::TEXT), or NULL if that type of stream is not present.
virtual DemuxerStream* GetStream(DemuxerStream::Type type) = 0;
- // Returns liveness of the streams provided, i.e. whether recorded or live.
- virtual Liveness GetLiveness() const = 0;
-
private:
DISALLOW_COPY_AND_ASSIGN(DemuxerStreamProvider);
};
diff --git a/chromium/media/base/eme_constants.h b/chromium/media/base/eme_constants.h
index 27ebfffa4c2..ba49c7d6a72 100644
--- a/chromium/media/base/eme_constants.h
+++ b/chromium/media/base/eme_constants.h
@@ -9,17 +9,23 @@
namespace media {
-// Defines bitmask values that specify registered initialization data types used
+// Defines values that specify registered Initialization Data Types used
// in Encrypted Media Extensions (EME).
-// The mask values are stored in a SupportedInitDataTypes.
-enum EmeInitDataType {
- EME_INIT_DATA_TYPE_NONE = 0,
- EME_INIT_DATA_TYPE_WEBM = 1 << 0,
-#if defined(USE_PROPRIETARY_CODECS)
- EME_INIT_DATA_TYPE_CENC = 1 << 1,
-#endif // defined(USE_PROPRIETARY_CODECS)
+// http://w3c.github.io/encrypted-media/initdata-format-registry.html#registry
+// The mask values are stored in a InitDataTypeMask.
+enum class EmeInitDataType {
+ UNKNOWN,
+ WEBM,
+ CENC,
+ KEYIDS
};
+typedef uint32_t InitDataTypeMask;
+const InitDataTypeMask kInitDataTypeMaskNone = 0;
+const InitDataTypeMask kInitDataTypeMaskWebM = 1 << 0;
+const InitDataTypeMask kInitDataTypeMaskCenc = 1 << 1;
+const InitDataTypeMask kInitDataTypeMaskKeyIds = 1 << 2;
+
// Defines bitmask values that specify codecs used in Encrypted Media Extension
// (EME). Each value represents a codec within a specific container.
// The mask values are stored in a SupportedCodecs.
@@ -27,27 +33,114 @@ enum EmeCodec {
// *_ALL values should only be used for masking, do not use them to specify
// codec support because they may be extended to include more codecs.
EME_CODEC_NONE = 0,
- EME_CODEC_WEBM_VORBIS = 1 << 0,
- EME_CODEC_WEBM_AUDIO_ALL = EME_CODEC_WEBM_VORBIS,
- EME_CODEC_WEBM_VP8 = 1 << 1,
- EME_CODEC_WEBM_VP9 = 1 << 2,
+ EME_CODEC_WEBM_OPUS = 1 << 0,
+ EME_CODEC_WEBM_VORBIS = 1 << 1,
+ EME_CODEC_WEBM_AUDIO_ALL = EME_CODEC_WEBM_OPUS | EME_CODEC_WEBM_VORBIS,
+ EME_CODEC_WEBM_VP8 = 1 << 2,
+ EME_CODEC_WEBM_VP9 = 1 << 3,
EME_CODEC_WEBM_VIDEO_ALL = (EME_CODEC_WEBM_VP8 | EME_CODEC_WEBM_VP9),
EME_CODEC_WEBM_ALL = (EME_CODEC_WEBM_AUDIO_ALL | EME_CODEC_WEBM_VIDEO_ALL),
#if defined(USE_PROPRIETARY_CODECS)
- EME_CODEC_MP4_AAC = 1 << 3,
+ EME_CODEC_MP4_AAC = 1 << 4,
EME_CODEC_MP4_AUDIO_ALL = EME_CODEC_MP4_AAC,
- EME_CODEC_MP4_AVC1 = 1 << 4,
+ EME_CODEC_MP4_AVC1 = 1 << 5,
EME_CODEC_MP4_VIDEO_ALL = EME_CODEC_MP4_AVC1,
EME_CODEC_MP4_ALL = (EME_CODEC_MP4_AUDIO_ALL | EME_CODEC_MP4_VIDEO_ALL),
+ EME_CODEC_AUDIO_ALL = (EME_CODEC_WEBM_AUDIO_ALL | EME_CODEC_MP4_AUDIO_ALL),
+ EME_CODEC_VIDEO_ALL = (EME_CODEC_WEBM_VIDEO_ALL | EME_CODEC_MP4_VIDEO_ALL),
EME_CODEC_ALL = (EME_CODEC_WEBM_ALL | EME_CODEC_MP4_ALL),
#else
+ EME_CODEC_AUDIO_ALL = EME_CODEC_WEBM_AUDIO_ALL,
+ EME_CODEC_VIDEO_ALL = EME_CODEC_WEBM_VIDEO_ALL,
EME_CODEC_ALL = EME_CODEC_WEBM_ALL,
#endif // defined(USE_PROPRIETARY_CODECS)
};
-typedef uint32_t SupportedInitDataTypes;
typedef uint32_t SupportedCodecs;
+enum class EmeSessionTypeSupport {
+ // Invalid default value.
+ INVALID,
+ // The session type is not supported.
+ NOT_SUPPORTED,
+ // The session type is supported if a distinctive identifier is available.
+ SUPPORTED_WITH_IDENTIFIER,
+ // The session type is always supported.
+ SUPPORTED,
+};
+
+// Used to declare support for distinctive identifier and persistent state.
+// These are purposefully limited to not allow one to require the other, so that
+// transitive requirements are not possible. Non-trivial refactoring would be
+// required to support transitive requirements.
+enum class EmeFeatureSupport {
+ // Invalid default value.
+ INVALID,
+ // Access to the feature is not supported at all.
+ NOT_SUPPORTED,
+ // Access to the feature may be requested.
+ REQUESTABLE,
+ // Access to the feature cannot be blocked.
+ ALWAYS_ENABLED,
+};
+
+enum class EmeMediaType {
+ AUDIO,
+ VIDEO,
+};
+
+// Robustness values understood by KeySystems.
+// Note: key_systems.cc expects this ordering in GetRobustnessConfigRule(),
+// make sure to correct that code if this list changes.
+enum class EmeRobustness {
+ INVALID,
+ EMPTY,
+ SW_SECURE_CRYPTO,
+ SW_SECURE_DECODE,
+ HW_SECURE_CRYPTO,
+ HW_SECURE_DECODE,
+ HW_SECURE_ALL,
+};
+
+// Configuration rules indicate the configuration state required to support a
+// configuration option (note: a configuration option may be disallowing a
+// feature). Configuration rules are used to answer queries about distinctive
+// identifier, persistent state, and robustness requirements, as well as to
+// describe support for different session types.
+//
+// If in the future there are reasons to request user permission other than
+// access to a distinctive identifier, then additional rules should be added.
+// Rules are implemented in ConfigState and are otherwise opaque.
+enum class EmeConfigRule {
+ // The configuration option is not supported.
+ NOT_SUPPORTED,
+ // The configuration option prevents use of a distinctive identifier.
+ IDENTIFIER_NOT_ALLOWED,
+ // The configuration option is supported if a distinctive identifier is
+ // available.
+ IDENTIFIER_REQUIRED,
+ // The configuration option is supported, but the user experience may be
+ // improved if a distinctive identifier is available.
+ IDENTIFIER_RECOMMENDED,
+ // The configuration option prevents use of persistent state.
+ PERSISTENCE_NOT_ALLOWED,
+ // The configuration option is supported if persistent state is available.
+ PERSISTENCE_REQUIRED,
+ // The configuration option is supported if both a distinctive identifier and
+ // persistent state are available.
+ IDENTIFIER_AND_PERSISTENCE_REQUIRED,
+ // The configuration option prevents use of hardware-secure codecs.
+ // This rule only has meaning on platforms that distinguish hardware-secure
+ // codecs (ie. Android).
+ HW_SECURE_CODECS_NOT_ALLOWED,
+ // The configuration option is supported if hardware-secure codecs are used.
+ // This rule only has meaning on platforms that distinguish hardware-secure
+ // codecs (ie. Android).
+ HW_SECURE_CODECS_REQUIRED,
+ // The configuration option is supported without conditions.
+ SUPPORTED,
+};
+
} // namespace media
#endif // MEDIA_BASE_EME_CONSTANTS_H_
diff --git a/chromium/media/base/fake_audio_render_callback.cc b/chromium/media/base/fake_audio_render_callback.cc
index 5a0979e9ea0..73d606ec3fb 100644
--- a/chromium/media/base/fake_audio_render_callback.cc
+++ b/chromium/media/base/fake_audio_render_callback.cc
@@ -15,6 +15,7 @@ FakeAudioRenderCallback::FakeAudioRenderCallback(double step)
: half_fill_(false),
step_(step),
last_audio_delay_milliseconds_(-1),
+ last_channel_count_(-1),
volume_(1) {
reset();
}
@@ -24,6 +25,8 @@ FakeAudioRenderCallback::~FakeAudioRenderCallback() {}
int FakeAudioRenderCallback::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
last_audio_delay_milliseconds_ = audio_delay_milliseconds;
+ last_channel_count_ = audio_bus->channels();
+
int number_of_frames = audio_bus->frames();
if (half_fill_)
number_of_frames /= 2;
diff --git a/chromium/media/base/fake_audio_render_callback.h b/chromium/media/base/fake_audio_render_callback.h
index 695365f98a7..65d7df2366b 100644
--- a/chromium/media/base/fake_audio_render_callback.h
+++ b/chromium/media/base/fake_audio_render_callback.h
@@ -22,17 +22,16 @@ class FakeAudioRenderCallback
// where x = [|number_of_frames| * m, |number_of_frames| * (m + 1)] and m =
// the number of Render() calls fulfilled thus far.
explicit FakeAudioRenderCallback(double step);
- virtual ~FakeAudioRenderCallback();
+ ~FakeAudioRenderCallback() override;
// Renders a sine wave into the provided audio data buffer. If |half_fill_|
// is set, will only fill half the buffer.
- virtual int Render(AudioBus* audio_bus,
- int audio_delay_milliseconds) override;
+ int Render(AudioBus* audio_bus, int audio_delay_milliseconds) override;
MOCK_METHOD0(OnRenderError, void());
// AudioTransform::ProvideAudioTransformInput implementation.
- virtual double ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) override;
+ double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) override;
// Toggles only filling half the requested amount during Render().
void set_half_fill(bool half_fill) { half_fill_ = half_fill; }
@@ -42,16 +41,21 @@ class FakeAudioRenderCallback
// Returns the last |audio_delay_milliseconds| provided to Render() or -1 if
// no Render() call occurred.
- int last_audio_delay_milliseconds() { return last_audio_delay_milliseconds_; }
+ int last_audio_delay_milliseconds() const {
+ return last_audio_delay_milliseconds_;
+ }
// Set volume information used by ProvideAudioTransformInput().
void set_volume(double volume) { volume_ = volume; }
+ int last_channel_count() const { return last_channel_count_; }
+
private:
bool half_fill_;
double x_;
double step_;
int last_audio_delay_milliseconds_;
+ int last_channel_count_;
double volume_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioRenderCallback);
diff --git a/chromium/media/filters/fake_demuxer_stream.cc b/chromium/media/base/fake_demuxer_stream.cc
index 6be6d0a2dcd..f0fbe9b5057 100644
--- a/chromium/media/filters/fake_demuxer_stream.cc
+++ b/chromium/media/base/fake_demuxer_stream.cc
@@ -2,19 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/fake_demuxer_stream.h"
+#include "media/base/fake_demuxer_stream.h"
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_helpers.h"
#include "media/base/video_frame.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -33,7 +34,7 @@ const uint8 kIv[] = {
FakeDemuxerStream::FakeDemuxerStream(int num_configs,
int num_buffers_in_one_config,
bool is_encrypted)
- : task_runner_(base::MessageLoopProxy::current()),
+ : task_runner_(base::ThreadTaskRunnerHandle::Get()),
num_configs_(num_configs),
num_buffers_in_one_config_(num_buffers_in_one_config),
config_changes_(num_configs > 1),
@@ -84,7 +85,7 @@ VideoDecoderConfig FakeDemuxerStream::video_decoder_config() {
}
// TODO(xhwang): Support audio if needed.
-DemuxerStream::Type FakeDemuxerStream::type() {
+DemuxerStream::Type FakeDemuxerStream::type() const {
DCHECK(task_runner_->BelongsToCurrentThread());
return VIDEO;
}
@@ -192,4 +193,22 @@ void FakeDemuxerStream::DoRead() {
base::ResetAndReturn(&read_cb_).Run(kOk, buffer);
}
+FakeDemuxerStreamProvider::FakeDemuxerStreamProvider(
+ int num_video_configs,
+ int num_video_buffers_in_one_config,
+ bool is_video_encrypted)
+ : fake_video_stream_(num_video_configs,
+ num_video_buffers_in_one_config,
+ is_video_encrypted) {
+}
+
+FakeDemuxerStreamProvider::~FakeDemuxerStreamProvider() {
+}
+
+DemuxerStream* FakeDemuxerStreamProvider::GetStream(DemuxerStream::Type type) {
+ if (type == DemuxerStream::Type::AUDIO)
+ return nullptr;
+ return &fake_video_stream_;
+};
+
} // namespace media
diff --git a/chromium/media/filters/fake_demuxer_stream.h b/chromium/media/base/fake_demuxer_stream.h
index fd651dc28ad..ddb8a46fdfb 100644
--- a/chromium/media/filters/fake_demuxer_stream.h
+++ b/chromium/media/base/fake_demuxer_stream.h
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_FAKE_DEMUXER_STREAM_H_
-#define MEDIA_FILTERS_FAKE_DEMUXER_STREAM_H_
+#ifndef MEDIA_BASE_FAKE_DEMUXER_STREAM_H_
+#define MEDIA_BASE_FAKE_DEMUXER_STREAM_H_
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/demuxer_stream_provider.h"
#include "media/base/video_decoder_config.h"
namespace base {
@@ -31,7 +32,7 @@ class FakeDemuxerStream : public DemuxerStream {
void Read(const ReadCB& read_cb) override;
AudioDecoderConfig audio_decoder_config() override;
VideoDecoderConfig video_decoder_config() override;
- Type type() override;
+ Type type() const override;
bool SupportsConfigChanges() override;
VideoRotation video_rotation() override;
@@ -102,6 +103,23 @@ class FakeDemuxerStream : public DemuxerStream {
DISALLOW_COPY_AND_ASSIGN(FakeDemuxerStream);
};
+class FakeDemuxerStreamProvider : public DemuxerStreamProvider {
+ public:
+ // Note: FakeDemuxerStream currently only supports a fake video DemuxerStream.
+ FakeDemuxerStreamProvider(int num_video_configs,
+ int num_video_buffers_in_one_config,
+ bool is_video_encrypted);
+ ~FakeDemuxerStreamProvider() override;
+
+ // DemuxerStreamProvider implementation.
+ DemuxerStream* GetStream(DemuxerStream::Type type) override;
+
+ private:
+ FakeDemuxerStream fake_video_stream_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeDemuxerStreamProvider);
+};
+
} // namespace media
-#endif // MEDIA_FILTERS_FAKE_DEMUXER_STREAM_H_
+#endif // MEDIA_BASE_FAKE_DEMUXER_STREAM_H_
diff --git a/chromium/media/filters/fake_demuxer_stream_unittest.cc b/chromium/media/base/fake_demuxer_stream_unittest.cc
index 0eb92b0c6ba..ea5ec784fe9 100644
--- a/chromium/media/filters/fake_demuxer_stream_unittest.cc
+++ b/chromium/media/base/fake_demuxer_stream_unittest.cc
@@ -8,7 +8,7 @@
#include "base/message_loop/message_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer_stream.h"
-#include "media/filters/fake_demuxer_stream.h"
+#include "media/base/fake_demuxer_stream.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -16,9 +16,10 @@ namespace media {
const int kNumBuffersInOneConfig = 9;
const int kNumBuffersToReadFirst = 5;
const int kNumConfigs = 3;
-COMPILE_ASSERT(kNumBuffersToReadFirst < kNumBuffersInOneConfig,
- do_not_read_too_many_buffers);
-COMPILE_ASSERT(kNumConfigs > 0, need_multiple_configs_to_trigger_config_change);
+static_assert(kNumBuffersToReadFirst < kNumBuffersInOneConfig,
+ "do not read too many buffers");
+static_assert(kNumConfigs > 0,
+ "need multiple configs to trigger config change");
class FakeDemuxerStreamTest : public testing::Test {
public:
diff --git a/chromium/media/base/fake_text_track_stream.cc b/chromium/media/base/fake_text_track_stream.cc
index f18e5403b4e..214eec435bb 100644
--- a/chromium/media/base/fake_text_track_stream.cc
+++ b/chromium/media/base/fake_text_track_stream.cc
@@ -6,13 +6,15 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
#include "media/base/decoder_buffer.h"
#include "media/filters/webvtt_util.h"
namespace media {
FakeTextTrackStream::FakeTextTrackStream()
- : task_runner_(base::MessageLoopProxy::current()),
+ : task_runner_(base::ThreadTaskRunnerHandle::Get()),
stopping_(false) {
}
@@ -32,7 +34,7 @@ void FakeTextTrackStream::Read(const ReadCB& read_cb) {
}
}
-DemuxerStream::Type FakeTextTrackStream::type() {
+DemuxerStream::Type FakeTextTrackStream::type() const {
return DemuxerStream::TEXT;
}
@@ -67,6 +69,9 @@ void FakeTextTrackStream::SatisfyPendingRead(
buffer->set_timestamp(start);
buffer->set_duration(duration);
+ // Assume all fake text buffers are keyframes.
+ buffer->set_is_key_frame(true);
+
base::ResetAndReturn(&read_cb_).Run(kOk, buffer);
}
diff --git a/chromium/media/base/fake_text_track_stream.h b/chromium/media/base/fake_text_track_stream.h
index 2db9934a7b6..bbfbe8bc0c5 100644
--- a/chromium/media/base/fake_text_track_stream.h
+++ b/chromium/media/base/fake_text_track_stream.h
@@ -15,16 +15,16 @@ namespace media {
class FakeTextTrackStream : public DemuxerStream {
public:
FakeTextTrackStream();
- virtual ~FakeTextTrackStream();
+ ~FakeTextTrackStream() override;
// DemuxerStream implementation.
- virtual void Read(const ReadCB&) override;
+ void Read(const ReadCB&) override;
MOCK_METHOD0(audio_decoder_config, AudioDecoderConfig());
MOCK_METHOD0(video_decoder_config, VideoDecoderConfig());
- virtual Type type() override;
+ Type type() const override;
MOCK_METHOD0(EnableBitstreamConverter, void());
- virtual bool SupportsConfigChanges();
- virtual VideoRotation video_rotation() override;
+ bool SupportsConfigChanges() override;
+ VideoRotation video_rotation() override;
void SatisfyPendingRead(const base::TimeDelta& start,
const base::TimeDelta& duration,
diff --git a/chromium/media/base/key_system_info.cc b/chromium/media/base/key_system_info.cc
index f4369124cf1..f36104ab120 100644
--- a/chromium/media/base/key_system_info.cc
+++ b/chromium/media/base/key_system_info.cc
@@ -6,11 +6,7 @@
namespace media {
-KeySystemInfo::KeySystemInfo(const std::string& key_system)
- : key_system(key_system),
- supported_init_data_types(EME_INIT_DATA_TYPE_NONE),
- supported_codecs(EME_CODEC_NONE),
- use_aes_decryptor(false) {
+KeySystemInfo::KeySystemInfo() {
}
KeySystemInfo::~KeySystemInfo() {
diff --git a/chromium/media/base/key_system_info.h b/chromium/media/base/key_system_info.h
index e259c00d855..d349452b885 100644
--- a/chromium/media/base/key_system_info.h
+++ b/chromium/media/base/key_system_info.h
@@ -7,6 +7,7 @@
#include <string>
+#include "build/build_config.h"
#include "media/base/eme_constants.h"
#include "media/base/media_export.h"
@@ -30,16 +31,24 @@ namespace media {
// Contains information about an EME key system as well as how to instantiate
// the corresponding CDM.
struct MEDIA_EXPORT KeySystemInfo {
- explicit KeySystemInfo(const std::string& key_system);
+ KeySystemInfo();
~KeySystemInfo();
std::string key_system;
- // Specifies registered initialization data types supported by |key_system|.
- SupportedInitDataTypes supported_init_data_types;
-
- // Specifies codecs supported by |key_system|.
- SupportedCodecs supported_codecs;
+ InitDataTypeMask supported_init_data_types = kInitDataTypeMaskNone;
+ SupportedCodecs supported_codecs = EME_CODEC_NONE;
+#if defined(OS_ANDROID)
+ SupportedCodecs supported_secure_codecs = EME_CODEC_NONE;
+#endif // defined(OS_ANDROID)
+ EmeRobustness max_audio_robustness = EmeRobustness::INVALID;
+ EmeRobustness max_video_robustness = EmeRobustness::INVALID;
+ EmeSessionTypeSupport persistent_license_support =
+ EmeSessionTypeSupport::INVALID;
+ EmeSessionTypeSupport persistent_release_message_support =
+ EmeSessionTypeSupport::INVALID;
+ EmeFeatureSupport persistent_state_support = EmeFeatureSupport::INVALID;
+ EmeFeatureSupport distinctive_identifier_support = EmeFeatureSupport::INVALID;
// A hierarchical parent for |key_system|. This value can be used to check
// supported types but cannot be used to instantiate a MediaKeys object.
@@ -47,7 +56,7 @@ struct MEDIA_EXPORT KeySystemInfo {
std::string parent_key_system;
// The following indicate how the corresponding CDM should be instantiated.
- bool use_aes_decryptor;
+ bool use_aes_decryptor = false;
#if defined(ENABLE_PEPPER_CDMS)
std::string pepper_type;
#endif
diff --git a/chromium/media/base/key_systems.cc b/chromium/media/base/key_systems.cc
new file mode 100644
index 00000000000..1cd202ca425
--- /dev/null
+++ b/chromium/media/base/key_systems.cc
@@ -0,0 +1,940 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/key_systems.h"
+
+
+#include "base/containers/hash_tables.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "media/base/key_system_info.h"
+#include "media/base/key_systems_support_uma.h"
+#include "media/base/media_client.h"
+#include "media/cdm/key_system_names.h"
+#include "third_party/widevine/cdm/widevine_cdm_common.h"
+
+namespace media {
+
+const char kClearKeyKeySystem[] = "org.w3.clearkey";
+const char kPrefixedClearKeyKeySystem[] = "webkit-org.w3.clearkey";
+const char kUnsupportedClearKeyKeySystem[] = "unsupported-org.w3.clearkey";
+
+// These names are used by UMA. Do not change them!
+const char kClearKeyKeySystemNameForUMA[] = "ClearKey";
+const char kUnknownKeySystemNameForUMA[] = "Unknown";
+
+struct NamedCodec {
+ const char* name;
+ EmeCodec type;
+};
+
+// Mapping between containers and their codecs.
+// Only audio codec can belong to a "audio/*" container. Both audio and video
+// codecs can belong to a "video/*" container.
+// TODO(sandersd): This definition only makes sense for prefixed EME. Change it
+// when prefixed EME is removed. http://crbug.com/249976
+static NamedCodec kContainerToCodecMasks[] = {
+ {"audio/webm", EME_CODEC_WEBM_AUDIO_ALL},
+ {"video/webm", EME_CODEC_WEBM_ALL},
+#if defined(USE_PROPRIETARY_CODECS)
+ {"audio/mp4", EME_CODEC_MP4_AUDIO_ALL},
+ {"video/mp4", EME_CODEC_MP4_ALL}
+#endif // defined(USE_PROPRIETARY_CODECS)
+};
+
+// Mapping between codec names and enum values.
+static NamedCodec kCodecStrings[] = {
+ {"opus", EME_CODEC_WEBM_OPUS},
+ {"vorbis", EME_CODEC_WEBM_VORBIS},
+ {"vp8", EME_CODEC_WEBM_VP8},
+ {"vp8.0", EME_CODEC_WEBM_VP8},
+ {"vp9", EME_CODEC_WEBM_VP9},
+ {"vp9.0", EME_CODEC_WEBM_VP9},
+#if defined(USE_PROPRIETARY_CODECS)
+ {"mp4a", EME_CODEC_MP4_AAC},
+ {"avc1", EME_CODEC_MP4_AVC1},
+ {"avc3", EME_CODEC_MP4_AVC1}
+#endif // defined(USE_PROPRIETARY_CODECS)
+};
+
+static EmeRobustness ConvertRobustness(const std::string& robustness) {
+ if (robustness.empty())
+ return EmeRobustness::EMPTY;
+ if (robustness == "SW_SECURE_CRYPTO")
+ return EmeRobustness::SW_SECURE_CRYPTO;
+ if (robustness == "SW_SECURE_DECODE")
+ return EmeRobustness::SW_SECURE_DECODE;
+ if (robustness == "HW_SECURE_CRYPTO")
+ return EmeRobustness::HW_SECURE_CRYPTO;
+ if (robustness == "HW_SECURE_DECODE")
+ return EmeRobustness::HW_SECURE_DECODE;
+ if (robustness == "HW_SECURE_ALL")
+ return EmeRobustness::HW_SECURE_ALL;
+ return EmeRobustness::INVALID;
+}
+
+static void AddClearKey(std::vector<KeySystemInfo>* concrete_key_systems) {
+ KeySystemInfo info;
+ info.key_system = kClearKeyKeySystem;
+
+ // On Android, Vorbis, VP8, AAC and AVC1 are supported in MediaCodec:
+ // http://developer.android.com/guide/appendix/media-formats.html
+ // VP9 support is device dependent.
+
+ info.supported_init_data_types =
+ kInitDataTypeMaskWebM | kInitDataTypeMaskKeyIds;
+ info.supported_codecs = EME_CODEC_WEBM_ALL;
+
+#if defined(OS_ANDROID)
+ // Temporarily disable VP9 support for Android.
+ // TODO(xhwang): Use mime_util.h to query VP9 support on Android.
+ info.supported_codecs &= ~EME_CODEC_WEBM_VP9;
+
+ // Opus is not supported on Android yet. http://crbug.com/318436.
+ // TODO(sandersd): Check for platform support to set this bit.
+ info.supported_codecs &= ~EME_CODEC_WEBM_OPUS;
+#endif // defined(OS_ANDROID)
+
+#if defined(USE_PROPRIETARY_CODECS)
+ info.supported_init_data_types |= kInitDataTypeMaskCenc;
+ info.supported_codecs |= EME_CODEC_MP4_ALL;
+#endif // defined(USE_PROPRIETARY_CODECS)
+
+ info.max_audio_robustness = EmeRobustness::EMPTY;
+ info.max_video_robustness = EmeRobustness::EMPTY;
+ info.persistent_license_support = EmeSessionTypeSupport::NOT_SUPPORTED;
+ info.persistent_release_message_support =
+ EmeSessionTypeSupport::NOT_SUPPORTED;
+ info.persistent_state_support = EmeFeatureSupport::NOT_SUPPORTED;
+ info.distinctive_identifier_support = EmeFeatureSupport::NOT_SUPPORTED;
+
+ info.use_aes_decryptor = true;
+
+ concrete_key_systems->push_back(info);
+}
+
+// Returns whether the |key_system| is known to Chromium and is thus likely to
+// be implemented in an interoperable way.
+// True is always returned for a |key_system| that begins with "x-".
+//
+// As with other web platform features, advertising support for a key system
+// implies that it adheres to a defined and interoperable specification.
+//
+// To ensure interoperability, implementations of a specific |key_system| string
+// must conform to a specification for that identifier that defines
+// key system-specific behaviors not fully defined by the EME specification.
+// That specification should be provided by the owner of the domain that is the
+// reverse of the |key_system| string.
+// This involves more than calling a library, SDK, or platform API.
+// KeySystemsImpl must be populated appropriately, and there will likely be glue
+// code to adapt to the API of the library, SDK, or platform API.
+//
+// Chromium mainline contains this data and glue code for specific key systems,
+// which should help ensure interoperability with other implementations using
+// these key systems.
+//
+// If you need to add support for other key systems, ensure that you have
+// obtained the specification for how to integrate it with EME, implemented the
+// appropriate glue/adapter code, and added all the appropriate data to
+// KeySystemsImpl. Only then should you change this function.
+static bool IsPotentiallySupportedKeySystem(const std::string& key_system) {
+ // Known and supported key systems.
+ if (key_system == kWidevineKeySystem)
+ return true;
+ if (key_system == kClearKey)
+ return true;
+
+ // External Clear Key is known and supports suffixes for testing.
+ if (IsExternalClearKey(key_system))
+ return true;
+
+ // Chromecast defines behaviors for Cast clients within its reverse domain.
+ const char kChromecastRoot[] = "com.chromecast";
+ if (IsParentKeySystemOf(kChromecastRoot, key_system))
+ return true;
+
+ // Implementations that do not have a specification or appropriate glue code
+ // can use the "x-" prefix to avoid conflicting with and advertising support
+ // for real key system names. Use is discouraged.
+ const char kExcludedPrefix[] = "x-";
+ if (key_system.find(kExcludedPrefix, 0, arraysize(kExcludedPrefix) - 1) == 0)
+ return true;
+
+ return false;
+}
+
+class KeySystemsImpl : public KeySystems {
+ public:
+ static KeySystemsImpl* GetInstance();
+
+ void UpdateIfNeeded();
+
+ bool IsConcreteSupportedKeySystem(const std::string& key_system) const;
+
+ bool PrefixedIsSupportedKeySystemWithMediaMimeType(
+ const std::string& mime_type,
+ const std::vector<std::string>& codecs,
+ const std::string& key_system);
+
+ std::string GetKeySystemNameForUMA(const std::string& key_system) const;
+
+ bool UseAesDecryptor(const std::string& concrete_key_system) const;
+
+#if defined(ENABLE_PEPPER_CDMS)
+ std::string GetPepperType(const std::string& concrete_key_system) const;
+#endif
+
+ void AddContainerMask(const std::string& container, uint32 mask);
+ void AddCodecMask(
+ EmeMediaType media_type,
+ const std::string& codec,
+ uint32 mask);
+
+ // Implementation of KeySystems interface.
+ bool IsSupportedKeySystem(const std::string& key_system) const override;
+
+ bool IsSupportedInitDataType(const std::string& key_system,
+ EmeInitDataType init_data_type) const override;
+
+ EmeConfigRule GetContentTypeConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& container_mime_type,
+ const std::vector<std::string>& codecs) const override;
+
+ EmeConfigRule GetRobustnessConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& requested_robustness) const override;
+
+ EmeSessionTypeSupport GetPersistentLicenseSessionSupport(
+ const std::string& key_system) const override;
+
+ EmeSessionTypeSupport GetPersistentReleaseMessageSessionSupport(
+ const std::string& key_system) const override;
+
+ EmeFeatureSupport GetPersistentStateSupport(
+ const std::string& key_system) const override;
+
+ EmeFeatureSupport GetDistinctiveIdentifierSupport(
+ const std::string& key_system) const override;
+
+ private:
+ KeySystemsImpl();
+ ~KeySystemsImpl() override;
+
+ void InitializeUMAInfo();
+
+ void UpdateSupportedKeySystems();
+
+ void AddConcreteSupportedKeySystems(
+ const std::vector<KeySystemInfo>& concrete_key_systems);
+
+ friend struct base::DefaultLazyInstanceTraits<KeySystemsImpl>;
+
+ typedef base::hash_map<std::string, KeySystemInfo> KeySystemInfoMap;
+ typedef base::hash_map<std::string, std::string> ParentKeySystemMap;
+ typedef base::hash_map<std::string, SupportedCodecs> ContainerCodecsMap;
+ typedef base::hash_map<std::string, EmeCodec> CodecsMap;
+ typedef base::hash_map<std::string, EmeInitDataType> InitDataTypesMap;
+ typedef base::hash_map<std::string, std::string> KeySystemNameForUMAMap;
+
+ // TODO(sandersd): Separate container enum from codec mask value.
+ // http://crbug.com/417440
+ SupportedCodecs GetCodecMaskForContainer(
+ const std::string& container) const;
+ EmeCodec GetCodecForString(const std::string& codec) const;
+
+ const std::string& PrefixedGetConcreteKeySystemNameFor(
+ const std::string& key_system) const;
+
+ // Returns whether a |container| type is supported by checking
+ // |key_system_supported_codecs|.
+ // TODO(xhwang): Update this to actually check initDataType support.
+ bool IsSupportedContainer(const std::string& container,
+ SupportedCodecs key_system_supported_codecs) const;
+
+ // Returns true if all |codecs| are supported in |container| by checking
+ // |key_system_supported_codecs|.
+ bool IsSupportedContainerAndCodecs(
+ const std::string& container,
+ const std::vector<std::string>& codecs,
+ SupportedCodecs key_system_supported_codecs) const;
+
+ // Map from key system string to capabilities.
+ KeySystemInfoMap concrete_key_system_map_;
+
+ // Map from parent key system to the concrete key system that should be used
+ // to represent its capabilities.
+ ParentKeySystemMap parent_key_system_map_;
+
+ KeySystemsSupportUMA key_systems_support_uma_;
+
+ ContainerCodecsMap container_to_codec_mask_map_;
+ CodecsMap codec_string_map_;
+ KeySystemNameForUMAMap key_system_name_for_uma_map_;
+
+ SupportedCodecs audio_codec_mask_;
+ SupportedCodecs video_codec_mask_;
+
+ // Makes sure all methods are called from the same thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeySystemsImpl);
+};
+
+static base::LazyInstance<KeySystemsImpl>::Leaky g_key_systems =
+ LAZY_INSTANCE_INITIALIZER;
+
+KeySystemsImpl* KeySystemsImpl::GetInstance() {
+ KeySystemsImpl* key_systems = g_key_systems.Pointer();
+ key_systems->UpdateIfNeeded();
+ return key_systems;
+}
+
+// Because we use a LazyInstance, the key systems info must be populated when
+// the instance is lazily initiated.
+KeySystemsImpl::KeySystemsImpl() :
+ audio_codec_mask_(EME_CODEC_AUDIO_ALL),
+ video_codec_mask_(EME_CODEC_VIDEO_ALL) {
+ for (size_t i = 0; i < arraysize(kContainerToCodecMasks); ++i) {
+ const std::string& name = kContainerToCodecMasks[i].name;
+ DCHECK(!container_to_codec_mask_map_.count(name));
+ container_to_codec_mask_map_[name] = kContainerToCodecMasks[i].type;
+ }
+ for (size_t i = 0; i < arraysize(kCodecStrings); ++i) {
+ const std::string& name = kCodecStrings[i].name;
+ DCHECK(!codec_string_map_.count(name));
+ codec_string_map_[name] = kCodecStrings[i].type;
+ }
+
+ InitializeUMAInfo();
+
+ // Always update supported key systems during construction.
+ UpdateSupportedKeySystems();
+}
+
+KeySystemsImpl::~KeySystemsImpl() {
+}
+
+SupportedCodecs KeySystemsImpl::GetCodecMaskForContainer(
+ const std::string& container) const {
+ ContainerCodecsMap::const_iterator iter =
+ container_to_codec_mask_map_.find(container);
+ if (iter != container_to_codec_mask_map_.end())
+ return iter->second;
+ return EME_CODEC_NONE;
+}
+
+EmeCodec KeySystemsImpl::GetCodecForString(const std::string& codec) const {
+ CodecsMap::const_iterator iter = codec_string_map_.find(codec);
+ if (iter != codec_string_map_.end())
+ return iter->second;
+ return EME_CODEC_NONE;
+}
+
+const std::string& KeySystemsImpl::PrefixedGetConcreteKeySystemNameFor(
+ const std::string& key_system) const {
+ ParentKeySystemMap::const_iterator iter =
+ parent_key_system_map_.find(key_system);
+ if (iter != parent_key_system_map_.end())
+ return iter->second;
+ return key_system;
+}
+
+void KeySystemsImpl::InitializeUMAInfo() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(key_system_name_for_uma_map_.empty());
+
+ std::vector<KeySystemInfoForUMA> key_systems_info_for_uma;
+ if (GetMediaClient())
+ GetMediaClient()->AddKeySystemsInfoForUMA(&key_systems_info_for_uma);
+
+ for (const KeySystemInfoForUMA& info : key_systems_info_for_uma) {
+ key_system_name_for_uma_map_[info.key_system] =
+ info.key_system_name_for_uma;
+ if (info.reports_key_system_support_to_uma)
+ key_systems_support_uma_.AddKeySystemToReport(info.key_system);
+ }
+
+ // Clear Key is always supported.
+ key_system_name_for_uma_map_[kClearKeyKeySystem] =
+ kClearKeyKeySystemNameForUMA;
+}
+
+void KeySystemsImpl::UpdateIfNeeded() {
+ if (GetMediaClient() && GetMediaClient()->IsKeySystemsUpdateNeeded())
+ UpdateSupportedKeySystems();
+}
+
+void KeySystemsImpl::UpdateSupportedKeySystems() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ concrete_key_system_map_.clear();
+ parent_key_system_map_.clear();
+
+ // Build KeySystemInfo.
+ std::vector<KeySystemInfo> key_systems_info;
+
+ // Add key systems supported by the MediaClient implementation.
+ if (GetMediaClient())
+ GetMediaClient()->AddSupportedKeySystems(&key_systems_info);
+
+ // Clear Key is always supported.
+ AddClearKey(&key_systems_info);
+
+ AddConcreteSupportedKeySystems(key_systems_info);
+}
+
+void KeySystemsImpl::AddConcreteSupportedKeySystems(
+ const std::vector<KeySystemInfo>& concrete_key_systems) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(concrete_key_system_map_.empty());
+ DCHECK(parent_key_system_map_.empty());
+
+ for (const KeySystemInfo& info : concrete_key_systems) {
+ DCHECK(!info.key_system.empty());
+ DCHECK(info.max_audio_robustness != EmeRobustness::INVALID);
+ DCHECK(info.max_video_robustness != EmeRobustness::INVALID);
+ DCHECK(info.persistent_license_support != EmeSessionTypeSupport::INVALID);
+ DCHECK(info.persistent_release_message_support !=
+ EmeSessionTypeSupport::INVALID);
+ DCHECK(info.persistent_state_support != EmeFeatureSupport::INVALID);
+ DCHECK(info.distinctive_identifier_support != EmeFeatureSupport::INVALID);
+
+ // Supporting persistent state is a prerequsite for supporting persistent
+ // sessions.
+ if (info.persistent_state_support == EmeFeatureSupport::NOT_SUPPORTED) {
+ DCHECK(info.persistent_license_support ==
+ EmeSessionTypeSupport::NOT_SUPPORTED);
+ DCHECK(info.persistent_release_message_support ==
+ EmeSessionTypeSupport::NOT_SUPPORTED);
+ }
+
+ // persistent-release-message sessions are not currently supported.
+ // http://crbug.com/448888
+ DCHECK(info.persistent_release_message_support ==
+ EmeSessionTypeSupport::NOT_SUPPORTED);
+
+ // If distinctive identifiers are not supported, then no other features can
+ // require them.
+ if (info.distinctive_identifier_support ==
+ EmeFeatureSupport::NOT_SUPPORTED) {
+ DCHECK(info.persistent_license_support !=
+ EmeSessionTypeSupport::SUPPORTED_WITH_IDENTIFIER);
+ DCHECK(info.persistent_release_message_support !=
+ EmeSessionTypeSupport::SUPPORTED_WITH_IDENTIFIER);
+ }
+
+ // Distinctive identifiers and persistent state can only be reliably blocked
+ // (and therefore be safely configurable) for Pepper-hosted key systems. For
+ // other platforms, (except for the AES decryptor) assume that the CDM can
+ // and will do anything.
+ bool can_block = info.use_aes_decryptor;
+#if defined(ENABLE_PEPPER_CDMS)
+ DCHECK_EQ(info.use_aes_decryptor, info.pepper_type.empty());
+ if (!info.pepper_type.empty())
+ can_block = true;
+#endif
+ if (!can_block) {
+ DCHECK(info.distinctive_identifier_support ==
+ EmeFeatureSupport::ALWAYS_ENABLED);
+ DCHECK(info.persistent_state_support ==
+ EmeFeatureSupport::ALWAYS_ENABLED);
+ }
+
+ DCHECK(!IsSupportedKeySystem(info.key_system))
+ << "Key system '" << info.key_system << "' already registered";
+ DCHECK(!parent_key_system_map_.count(info.key_system))
+ << "'" << info.key_system << "' is already registered as a parent";
+ concrete_key_system_map_[info.key_system] = info;
+ if (!info.parent_key_system.empty()) {
+ DCHECK(!IsConcreteSupportedKeySystem(info.parent_key_system))
+ << "Parent '" << info.parent_key_system << "' "
+ << "already registered concrete";
+ DCHECK(!parent_key_system_map_.count(info.parent_key_system))
+ << "Parent '" << info.parent_key_system << "' already registered";
+ parent_key_system_map_[info.parent_key_system] = info.key_system;
+ }
+ }
+}
+
+bool KeySystemsImpl::IsConcreteSupportedKeySystem(
+ const std::string& key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return concrete_key_system_map_.count(key_system) != 0;
+}
+
+bool KeySystemsImpl::IsSupportedContainer(
+ const std::string& container,
+ SupportedCodecs key_system_supported_codecs) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!container.empty());
+
+ // When checking container support for EME, "audio/foo" should be treated the
+ // same as "video/foo". Convert the |container| to achieve this.
+ // TODO(xhwang): Replace this with real checks against supported initDataTypes
+ // combined with supported demuxers.
+ std::string canonical_container = container;
+ if (container.find("audio/") == 0)
+ canonical_container.replace(0, 6, "video/");
+
+ // A container is supported iif at least one codec in that container is
+ // supported.
+ SupportedCodecs supported_codecs =
+ GetCodecMaskForContainer(canonical_container);
+ return (supported_codecs & key_system_supported_codecs) != 0;
+}
+
+bool KeySystemsImpl::IsSupportedContainerAndCodecs(
+ const std::string& container,
+ const std::vector<std::string>& codecs,
+ SupportedCodecs key_system_supported_codecs) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!container.empty());
+ DCHECK(!codecs.empty());
+ DCHECK(IsSupportedContainer(container, key_system_supported_codecs));
+
+ SupportedCodecs container_supported_codecs =
+ GetCodecMaskForContainer(container);
+
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ if (codecs[i].empty())
+ continue;
+
+ EmeCodec codec = GetCodecForString(codecs[i]);
+
+ // Unsupported codec.
+ if (!(codec & key_system_supported_codecs))
+ return false;
+
+ // Unsupported codec/container combination, e.g. "video/webm" and "avc1".
+ if (!(codec & container_supported_codecs))
+ return false;
+ }
+
+ return true;
+}
+
+bool KeySystemsImpl::IsSupportedInitDataType(
+ const std::string& key_system,
+ EmeInitDataType init_data_type) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Locate |key_system|. Only concrete key systems are supported in unprefixed.
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ NOTREACHED();
+ return false;
+ }
+
+ // Check |init_data_type|.
+ InitDataTypeMask available_init_data_types =
+ key_system_iter->second.supported_init_data_types;
+ switch (init_data_type) {
+ case EmeInitDataType::UNKNOWN:
+ return false;
+ case EmeInitDataType::WEBM:
+ return (available_init_data_types & kInitDataTypeMaskWebM) != 0;
+ case EmeInitDataType::CENC:
+ return (available_init_data_types & kInitDataTypeMaskCenc) != 0;
+ case EmeInitDataType::KEYIDS:
+ return (available_init_data_types & kInitDataTypeMaskKeyIds) != 0;
+ }
+ NOTREACHED();
+ return false;
+}
+
+bool KeySystemsImpl::PrefixedIsSupportedKeySystemWithMediaMimeType(
+ const std::string& mime_type,
+ const std::vector<std::string>& codecs,
+ const std::string& key_system) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ const std::string& concrete_key_system =
+ PrefixedGetConcreteKeySystemNameFor(key_system);
+
+ bool has_type = !mime_type.empty();
+
+ key_systems_support_uma_.ReportKeySystemQuery(key_system, has_type);
+
+ // Check key system support.
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(concrete_key_system);
+ if (key_system_iter == concrete_key_system_map_.end())
+ return false;
+
+ key_systems_support_uma_.ReportKeySystemSupport(key_system, false);
+
+ if (!has_type) {
+ DCHECK(codecs.empty());
+ return true;
+ }
+
+ SupportedCodecs key_system_supported_codecs =
+ key_system_iter->second.supported_codecs;
+
+ if (!IsSupportedContainer(mime_type, key_system_supported_codecs))
+ return false;
+
+ if (!codecs.empty() &&
+ !IsSupportedContainerAndCodecs(
+ mime_type, codecs, key_system_supported_codecs)) {
+ return false;
+ }
+
+ key_systems_support_uma_.ReportKeySystemSupport(key_system, true);
+
+ return true;
+}
+
+std::string KeySystemsImpl::GetKeySystemNameForUMA(
+ const std::string& key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ KeySystemNameForUMAMap::const_iterator iter =
+ key_system_name_for_uma_map_.find(key_system);
+ if (iter == key_system_name_for_uma_map_.end())
+ return kUnknownKeySystemNameForUMA;
+
+ return iter->second;
+}
+
+bool KeySystemsImpl::UseAesDecryptor(
+ const std::string& concrete_key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(concrete_key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ DLOG(FATAL) << concrete_key_system << " is not a known concrete system";
+ return false;
+ }
+
+ return key_system_iter->second.use_aes_decryptor;
+}
+
+#if defined(ENABLE_PEPPER_CDMS)
+std::string KeySystemsImpl::GetPepperType(
+ const std::string& concrete_key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(concrete_key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ DLOG(FATAL) << concrete_key_system << " is not a known concrete system";
+ return std::string();
+ }
+
+ const std::string& type = key_system_iter->second.pepper_type;
+ DLOG_IF(FATAL, type.empty()) << concrete_key_system << " is not Pepper-based";
+ return type;
+}
+#endif
+
+void KeySystemsImpl::AddContainerMask(
+ const std::string& container,
+ uint32 mask) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!container_to_codec_mask_map_.count(container));
+ container_to_codec_mask_map_[container] = static_cast<EmeCodec>(mask);
+}
+
+void KeySystemsImpl::AddCodecMask(
+ EmeMediaType media_type,
+ const std::string& codec,
+ uint32 mask) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!codec_string_map_.count(codec));
+ codec_string_map_[codec] = static_cast<EmeCodec>(mask);
+ if (media_type == EmeMediaType::AUDIO) {
+ audio_codec_mask_ |= mask;
+ } else {
+ video_codec_mask_ |= mask;
+ }
+}
+
+bool KeySystemsImpl::IsSupportedKeySystem(const std::string& key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return concrete_key_system_map_.count(key_system) != 0;
+}
+
+EmeConfigRule KeySystemsImpl::GetContentTypeConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& container_mime_type,
+ const std::vector<std::string>& codecs) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Make sure the container matches |media_type|.
+ SupportedCodecs media_type_codec_mask = EME_CODEC_NONE;
+ switch (media_type) {
+ case EmeMediaType::AUDIO:
+ if (!StartsWithASCII(container_mime_type, "audio/", true))
+ return EmeConfigRule::NOT_SUPPORTED;
+ media_type_codec_mask = audio_codec_mask_;
+ break;
+ case EmeMediaType::VIDEO:
+ if (!StartsWithASCII(container_mime_type, "video/", true))
+ return EmeConfigRule::NOT_SUPPORTED;
+ media_type_codec_mask = video_codec_mask_;
+ break;
+ }
+
+ // Look up the key system's supported codecs.
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ NOTREACHED();
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+ SupportedCodecs key_system_codec_mask =
+ key_system_iter->second.supported_codecs;
+#if defined(OS_ANDROID)
+ SupportedCodecs key_system_secure_codec_mask =
+ key_system_iter->second.supported_secure_codecs;
+#endif // defined(OS_ANDROID)
+
+
+ // Check that the container is supported by the key system. (This check is
+ // necessary because |codecs| may be empty.)
+ SupportedCodecs container_codec_mask =
+ GetCodecMaskForContainer(container_mime_type) & media_type_codec_mask;
+ if ((key_system_codec_mask & container_codec_mask) == 0)
+ return EmeConfigRule::NOT_SUPPORTED;
+
+ // Check that the codecs are supported by the key system and container.
+ EmeConfigRule support = EmeConfigRule::SUPPORTED;
+ for (size_t i = 0; i < codecs.size(); i++) {
+ SupportedCodecs codec = GetCodecForString(codecs[i]);
+ if ((codec & key_system_codec_mask & container_codec_mask) == 0)
+ return EmeConfigRule::NOT_SUPPORTED;
+#if defined(OS_ANDROID)
+ // Check whether the codec supports a hardware-secure mode. The goal is to
+ // prevent mixing of non-hardware-secure codecs with hardware-secure codecs,
+ // since the mode is fixed at CDM creation.
+ //
+ // Because the check for regular codec support is early-exit, we don't have
+ // to consider codecs that are only supported in hardware-secure mode. We
+ // could do so, and make use of HW_SECURE_CODECS_REQUIRED, if it turns out
+ // that hardware-secure-only codecs actually exist and are useful.
+ if ((codec & key_system_secure_codec_mask) == 0)
+ support = EmeConfigRule::HW_SECURE_CODECS_NOT_ALLOWED;
+#endif // defined(OS_ANDROID)
+ }
+
+ return support;
+}
+
+EmeConfigRule KeySystemsImpl::GetRobustnessConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& requested_robustness) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ EmeRobustness robustness = ConvertRobustness(requested_robustness);
+ if (robustness == EmeRobustness::INVALID)
+ return EmeConfigRule::NOT_SUPPORTED;
+ if (robustness == EmeRobustness::EMPTY)
+ return EmeConfigRule::SUPPORTED;
+
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ NOTREACHED();
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+
+ EmeRobustness max_robustness = EmeRobustness::INVALID;
+ switch (media_type) {
+ case EmeMediaType::AUDIO:
+ max_robustness = key_system_iter->second.max_audio_robustness;
+ break;
+ case EmeMediaType::VIDEO:
+ max_robustness = key_system_iter->second.max_video_robustness;
+ break;
+ }
+
+ // We can compare robustness levels whenever they are not HW_SECURE_CRYPTO
+ // and SW_SECURE_DECODE in some order. If they are exactly those two then the
+ // robustness requirement is not supported.
+ if ((max_robustness == EmeRobustness::HW_SECURE_CRYPTO &&
+ robustness == EmeRobustness::SW_SECURE_DECODE) ||
+ (max_robustness == EmeRobustness::SW_SECURE_DECODE &&
+ robustness == EmeRobustness::HW_SECURE_CRYPTO) ||
+ robustness > max_robustness) {
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+
+ if (key_system == kWidevineKeySystem) {
+#if defined(OS_CHROMEOS)
+ // Hardware security requires remote attestation.
+ if (robustness >= EmeRobustness::HW_SECURE_CRYPTO)
+ return EmeConfigRule::IDENTIFIER_REQUIRED;
+
+ // For video, recommend remote attestation if HW_SECURE_ALL is available,
+ // because it enables hardware accelerated decoding.
+ // TODO(sandersd): Only do this when hardware accelerated decoding is
+ // available for the requested codecs.
+ if (media_type == EmeMediaType::VIDEO &&
+ max_robustness == EmeRobustness::HW_SECURE_ALL) {
+ return EmeConfigRule::IDENTIFIER_RECOMMENDED;
+ }
+#elif defined(OS_ANDROID)
+ if (robustness > EmeRobustness::SW_SECURE_CRYPTO)
+ return EmeConfigRule::HW_SECURE_CODECS_REQUIRED;
+#endif // defined(OS_CHROMEOS)
+ }
+
+ return EmeConfigRule::SUPPORTED;
+}
+
+EmeSessionTypeSupport KeySystemsImpl::GetPersistentLicenseSessionSupport(
+ const std::string& key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ NOTREACHED();
+ return EmeSessionTypeSupport::INVALID;
+ }
+ return key_system_iter->second.persistent_license_support;
+}
+
+EmeSessionTypeSupport KeySystemsImpl::GetPersistentReleaseMessageSessionSupport(
+ const std::string& key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ NOTREACHED();
+ return EmeSessionTypeSupport::INVALID;
+ }
+ return key_system_iter->second.persistent_release_message_support;
+}
+
+EmeFeatureSupport KeySystemsImpl::GetPersistentStateSupport(
+ const std::string& key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ NOTREACHED();
+ return EmeFeatureSupport::INVALID;
+ }
+ return key_system_iter->second.persistent_state_support;
+}
+
+EmeFeatureSupport KeySystemsImpl::GetDistinctiveIdentifierSupport(
+ const std::string& key_system) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ KeySystemInfoMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(key_system);
+ if (key_system_iter == concrete_key_system_map_.end()) {
+ NOTREACHED();
+ return EmeFeatureSupport::INVALID;
+ }
+ return key_system_iter->second.distinctive_identifier_support;
+}
+
+KeySystems* KeySystems::GetInstance() {
+ return KeySystemsImpl::GetInstance();
+}
+
+//------------------------------------------------------------------------------
+
+std::string GetUnprefixedKeySystemName(const std::string& key_system) {
+ if (key_system == kClearKeyKeySystem)
+ return kUnsupportedClearKeyKeySystem;
+
+ if (key_system == kPrefixedClearKeyKeySystem)
+ return kClearKeyKeySystem;
+
+ return key_system;
+}
+
+std::string GetPrefixedKeySystemName(const std::string& key_system) {
+ DCHECK_NE(key_system, kPrefixedClearKeyKeySystem);
+
+ if (key_system == kClearKeyKeySystem)
+ return kPrefixedClearKeyKeySystem;
+
+ return key_system;
+}
+
+bool PrefixedIsSupportedConcreteKeySystem(const std::string& key_system) {
+ return KeySystemsImpl::GetInstance()->IsConcreteSupportedKeySystem(
+ key_system);
+}
+
+bool IsSupportedKeySystem(const std::string& key_system) {
+ if (!KeySystemsImpl::GetInstance()->IsSupportedKeySystem(key_system))
+ return false;
+
+ // TODO(ddorwin): Move this to where we add key systems when prefixed EME is
+ // removed (crbug.com/249976).
+ if (!IsPotentiallySupportedKeySystem(key_system)) {
+ // If you encounter this path, see the comments for the above function.
+ NOTREACHED() << "Unrecognized key system " << key_system
+ << ". See code comments.";
+ return false;
+ }
+
+ return true;
+}
+
+bool IsSupportedKeySystemWithInitDataType(const std::string& key_system,
+ EmeInitDataType init_data_type) {
+ return KeySystemsImpl::GetInstance()->IsSupportedInitDataType(key_system,
+ init_data_type);
+}
+
+bool PrefixedIsSupportedKeySystemWithMediaMimeType(
+ const std::string& mime_type,
+ const std::vector<std::string>& codecs,
+ const std::string& key_system) {
+ return KeySystemsImpl::GetInstance()
+ ->PrefixedIsSupportedKeySystemWithMediaMimeType(mime_type, codecs,
+ key_system);
+}
+
+std::string GetKeySystemNameForUMA(const std::string& key_system) {
+ return KeySystemsImpl::GetInstance()->GetKeySystemNameForUMA(key_system);
+}
+
+bool CanUseAesDecryptor(const std::string& concrete_key_system) {
+ return KeySystemsImpl::GetInstance()->UseAesDecryptor(concrete_key_system);
+}
+
+#if defined(ENABLE_PEPPER_CDMS)
+std::string GetPepperType(const std::string& concrete_key_system) {
+ return KeySystemsImpl::GetInstance()->GetPepperType(concrete_key_system);
+}
+#endif
+
+// These two functions are for testing purpose only. The declaration in the
+// header file is guarded by "#if defined(UNIT_TEST)" so that they can be used
+// by tests but not non-test code. However, this .cc file is compiled as part of
+// "media" where "UNIT_TEST" is not defined. So we need to specify
+// "MEDIA_EXPORT" here again so that they are visible to tests.
+
+MEDIA_EXPORT void AddContainerMask(const std::string& container, uint32 mask) {
+ KeySystemsImpl::GetInstance()->AddContainerMask(container, mask);
+}
+
+MEDIA_EXPORT void AddCodecMask(
+ EmeMediaType media_type,
+ const std::string& codec,
+ uint32 mask) {
+ KeySystemsImpl::GetInstance()->AddCodecMask(media_type, codec, mask);
+}
+
+} // namespace media
diff --git a/chromium/media/base/key_systems.h b/chromium/media/base/key_systems.h
new file mode 100644
index 00000000000..ade5d65ca6b
--- /dev/null
+++ b/chromium/media/base/key_systems.h
@@ -0,0 +1,134 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_KEY_SYSTEMS_H_
+#define MEDIA_BASE_KEY_SYSTEMS_H_
+
+#include <string>
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/eme_constants.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Provides an interface for querying registered key systems. The exposed API is
+// only intended to support unprefixed EME.
+//
+// Many of the original static methods are still available, they should be
+// migrated into this interface over time (or removed).
+//
+// TODO(sandersd): Provide GetKeySystem() so that it is not necessary to pass
+// |key_system| to every method. http://crbug.com/457438
+class MEDIA_EXPORT KeySystems {
+ public:
+ static KeySystems* GetInstance();
+
+ // Returns whether |key_system| is a supported key system.
+ virtual bool IsSupportedKeySystem(const std::string& key_system) const = 0;
+
+ // Returns whether |init_data_type| is supported by |key_system|.
+ virtual bool IsSupportedInitDataType(
+ const std::string& key_system,
+ EmeInitDataType init_data_type) const = 0;
+
+ // Returns the configuration rule for supporting a container and list of
+ // codecs.
+ virtual EmeConfigRule GetContentTypeConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& container_mime_type,
+ const std::vector<std::string>& codecs) const = 0;
+
+ // Returns the configuration rule for supporting a robustness requirement.
+ virtual EmeConfigRule GetRobustnessConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& requested_robustness) const = 0;
+
+ // Returns the support |key_system| provides for persistent-license sessions.
+ virtual EmeSessionTypeSupport GetPersistentLicenseSessionSupport(
+ const std::string& key_system) const = 0;
+
+ // Returns the support |key_system| provides for persistent-release-message
+ // sessions.
+ virtual EmeSessionTypeSupport GetPersistentReleaseMessageSessionSupport(
+ const std::string& key_system) const = 0;
+
+ // Returns the support |key_system| provides for persistent state.
+ virtual EmeFeatureSupport GetPersistentStateSupport(
+ const std::string& key_system) const = 0;
+
+ // Returns the support |key_system| provides for distinctive identifiers.
+ virtual EmeFeatureSupport GetDistinctiveIdentifierSupport(
+ const std::string& key_system) const = 0;
+
+ protected:
+ virtual ~KeySystems() {};
+};
+
+// Prefixed EME API only supports prefixed (webkit-) key system name for
+// certain key systems. But internally only unprefixed key systems are
+// supported. The following two functions help convert between prefixed and
+// unprefixed key system names.
+
+// Gets the unprefixed key system name for |key_system|.
+MEDIA_EXPORT std::string GetUnprefixedKeySystemName(
+ const std::string& key_system);
+
+// Gets the prefixed key system name for |key_system|.
+MEDIA_EXPORT std::string GetPrefixedKeySystemName(
+ const std::string& key_system);
+
+// Use for unprefixed EME only!
+// Returns whether |key_system| is a supported key system.
+// Note: Shouldn't be used for prefixed API as the original
+MEDIA_EXPORT bool IsSupportedKeySystem(const std::string& key_system);
+
+// Use for prefixed EME only!
+MEDIA_EXPORT bool IsSupportedKeySystemWithInitDataType(
+ const std::string& key_system,
+ EmeInitDataType init_data_type);
+
+// Use for prefixed EME only!
+// Returns whether |key_system| is a real supported key system that can be
+// instantiated.
+// Abstract parent |key_system| strings will return false.
+MEDIA_EXPORT bool PrefixedIsSupportedConcreteKeySystem(
+ const std::string& key_system);
+
+// Use for prefixed EME only!
+// Returns whether |key_system| supports the specified media type and codec(s).
+// To be used with prefixed EME only as it generates UMAs based on the query.
+MEDIA_EXPORT bool PrefixedIsSupportedKeySystemWithMediaMimeType(
+ const std::string& mime_type,
+ const std::vector<std::string>& codecs,
+ const std::string& key_system);
+
+// Returns a name for |key_system| suitable to UMA logging.
+MEDIA_EXPORT std::string GetKeySystemNameForUMA(const std::string& key_system);
+
+// Returns whether AesDecryptor can be used for the given |concrete_key_system|.
+MEDIA_EXPORT bool CanUseAesDecryptor(const std::string& concrete_key_system);
+
+#if defined(ENABLE_PEPPER_CDMS)
+// Returns the Pepper MIME type for |concrete_key_system|.
+// Returns empty string if |concrete_key_system| is unknown or not Pepper-based.
+MEDIA_EXPORT std::string GetPepperType(
+ const std::string& concrete_key_system);
+#endif
+
+#if defined(UNIT_TEST)
+// Helper functions to add container/codec types for testing purposes.
+MEDIA_EXPORT void AddContainerMask(const std::string& container, uint32 mask);
+MEDIA_EXPORT void AddCodecMask(
+ EmeMediaType media_type,
+ const std::string& codec,
+ uint32 mask);
+#endif // defined(UNIT_TEST)
+
+} // namespace media
+
+#endif // MEDIA_BASE_KEY_SYSTEMS_H_
diff --git a/chromium/media/base/key_systems_support_uma.cc b/chromium/media/base/key_systems_support_uma.cc
new file mode 100644
index 00000000000..60070263e0b
--- /dev/null
+++ b/chromium/media/base/key_systems_support_uma.cc
@@ -0,0 +1,134 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/key_systems_support_uma.h"
+
+
+#include "base/metrics/histogram.h"
+#include "media/base/key_systems.h"
+
+namespace media {
+
+namespace {
+
+const char kKeySystemSupportUMAPrefix[] = "Media.EME.KeySystemSupport.";
+
+// These values are reported to UMA. Do not change the existing values!
+enum KeySystemSupportStatus {
+ KEY_SYSTEM_QUERIED = 0,
+ KEY_SYSTEM_SUPPORTED = 1,
+ KEY_SYSTEM_WITH_TYPE_QUERIED = 2,
+ KEY_SYSTEM_WITH_TYPE_SUPPORTED = 3,
+ KEY_SYSTEM_SUPPORT_STATUS_COUNT
+};
+
+// Reports an event only once.
+class OneTimeReporter {
+ public:
+ OneTimeReporter(const std::string& key_system, KeySystemSupportStatus status);
+ ~OneTimeReporter();
+
+ void Report();
+
+ private:
+ bool is_reported_;
+ const std::string key_system_;
+ const KeySystemSupportStatus status_;
+};
+
+OneTimeReporter::OneTimeReporter(const std::string& key_system,
+ KeySystemSupportStatus status)
+ : is_reported_(false), key_system_(key_system), status_(status) {
+}
+
+OneTimeReporter::~OneTimeReporter() {}
+
+void OneTimeReporter::Report() {
+ if (is_reported_)
+ return;
+
+ // Not using UMA_HISTOGRAM_ENUMERATION directly because UMA_* macros require
+ // the names to be constant throughout the process' lifetime.
+ base::LinearHistogram::FactoryGet(
+ kKeySystemSupportUMAPrefix + GetKeySystemNameForUMA(key_system_), 1,
+ KEY_SYSTEM_SUPPORT_STATUS_COUNT, KEY_SYSTEM_SUPPORT_STATUS_COUNT + 1,
+ base::Histogram::kUmaTargetedHistogramFlag)->Add(status_);
+
+ is_reported_ = true;
+}
+
+} // namespace
+
+class KeySystemsSupportUMA::Reporter {
+ public:
+ explicit Reporter(const std::string& key_system);
+ ~Reporter();
+
+ void Report(bool has_type, bool is_supported);
+
+ private:
+ const std::string key_system_;
+
+ OneTimeReporter call_reporter_;
+ OneTimeReporter call_with_type_reporter_;
+ OneTimeReporter support_reporter_;
+ OneTimeReporter support_with_type_reporter_;
+};
+
+KeySystemsSupportUMA::Reporter::Reporter(const std::string& key_system)
+ : key_system_(key_system),
+ call_reporter_(key_system, KEY_SYSTEM_QUERIED),
+ call_with_type_reporter_(key_system, KEY_SYSTEM_WITH_TYPE_QUERIED),
+ support_reporter_(key_system, KEY_SYSTEM_SUPPORTED),
+ support_with_type_reporter_(key_system, KEY_SYSTEM_WITH_TYPE_SUPPORTED) {}
+
+KeySystemsSupportUMA::Reporter::~Reporter() {}
+
+void KeySystemsSupportUMA::Reporter::Report(bool has_type, bool is_supported) {
+ call_reporter_.Report();
+ if (has_type)
+ call_with_type_reporter_.Report();
+
+ if (!is_supported)
+ return;
+
+ support_reporter_.Report();
+ if (has_type)
+ support_with_type_reporter_.Report();
+}
+
+KeySystemsSupportUMA::KeySystemsSupportUMA() {}
+
+KeySystemsSupportUMA::~KeySystemsSupportUMA() {}
+
+void KeySystemsSupportUMA::AddKeySystemToReport(const std::string& key_system) {
+ DCHECK(!GetReporter(key_system));
+ reporters_.set(key_system, scoped_ptr<Reporter>(new Reporter(key_system)));
+}
+
+void KeySystemsSupportUMA::ReportKeySystemQuery(const std::string& key_system,
+ bool has_type) {
+ Reporter* reporter = GetReporter(key_system);
+ if (!reporter)
+ return;
+ reporter->Report(has_type, false);
+}
+
+void KeySystemsSupportUMA::ReportKeySystemSupport(const std::string& key_system,
+ bool has_type) {
+ Reporter* reporter = GetReporter(key_system);
+ if (!reporter)
+ return;
+ reporter->Report(has_type, true);
+}
+
+KeySystemsSupportUMA::Reporter* KeySystemsSupportUMA::GetReporter(
+ const std::string& key_system) {
+ Reporters::iterator reporter = reporters_.find(key_system);
+ if (reporter == reporters_.end())
+ return NULL;
+ return reporter->second;
+}
+
+} // namespace media
diff --git a/chromium/media/base/key_systems_support_uma.h b/chromium/media/base/key_systems_support_uma.h
new file mode 100644
index 00000000000..28f334db995
--- /dev/null
+++ b/chromium/media/base/key_systems_support_uma.h
@@ -0,0 +1,57 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_KEY_SYSTEMS_SUPPORT_UMA_H_
+#define MEDIA_BASE_KEY_SYSTEMS_SUPPORT_UMA_H_
+
+#include <string>
+
+#include "base/containers/scoped_ptr_hash_map.h"
+
+namespace media {
+
+// Key system support UMA statistics for queried key systems.
+// 1. The key system is queried (with or without a MIME type).
+// 2. The key system is queried with a MIME type.
+// 3. The queried key system is supported (with or without a MIME type). This is
+// reported when the key system is supported when queried, regardless of
+// whether a MIME type is specified.
+// 4. The queried key system is supported with a MIME type. This is reported
+// when the key system is supported when queried without a MIME type
+// specified.
+// Note: All 4 stats are only reported once per renderer process per key system.
+class KeySystemsSupportUMA {
+ public:
+ KeySystemsSupportUMA();
+ ~KeySystemsSupportUMA();
+
+ // Adds a |key_system| for which query/support statistics are reported.
+ // If you use this function to add key system to report, make sure to update
+ // AddKeySystemSupportActions() in tools/metrics/actions/extract_actions.py.
+ void AddKeySystemToReport(const std::string& key_system);
+
+ // Reports that the |key_system| is queried. When |has_type|, also reports
+ // that the |key_system| with a MIME type is queried.
+ void ReportKeySystemQuery(const std::string& key_system, bool has_type);
+
+ // Reports that the queried |key_system| is supported. When |has_type| (a
+ // a MIME type is specified in the query), also reports that the queried
+ // |key_system| is supported with that MIME type.
+ void ReportKeySystemSupport(const std::string& key_system, bool has_type);
+
+ private:
+ class Reporter;
+
+ // Returns the Reporter for |key_system|. Returns NULL if |key_system| was not
+ // added for UMA reporting.
+ Reporter* GetReporter(const std::string& key_system);
+
+ // Key system <-> Reporter map.
+ typedef base::ScopedPtrHashMap<std::string, scoped_ptr<Reporter>> Reporters;
+ Reporters reporters_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_KEY_SYSTEMS_SUPPORT_UMA_H_
diff --git a/chromium/media/base/key_systems_unittest.cc b/chromium/media/base/key_systems_unittest.cc
new file mode 100644
index 00000000000..1a294889258
--- /dev/null
+++ b/chromium/media/base/key_systems_unittest.cc
@@ -0,0 +1,849 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(sandersd): Refactor to remove recomputed codec arrays, and generally
+// shorten and improve coverage.
+// - http://crbug.com/417444
+// - http://crbug.com/457438
+// TODO(sandersd): Add tests to cover codec vectors with empty items.
+// http://crbug.com/417461
+
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "media/base/eme_constants.h"
+#include "media/base/key_system_info.h"
+#include "media/base/key_systems.h"
+#include "media/base/media_client.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests are not always available, including on Android.
+// EXPECT_DEBUG_DEATH_PORTABLE executes tests correctly except in the case that
+// death tests are not available and NDEBUG is not defined.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define EXPECT_DEBUG_DEATH_PORTABLE(statement, regex) \
+ EXPECT_DEBUG_DEATH(statement, regex)
+#else
+#if defined(NDEBUG)
+#define EXPECT_DEBUG_DEATH_PORTABLE(statement, regex) \
+ do { statement; } while (false)
+#else
+#include "base/logging.h"
+#define EXPECT_DEBUG_DEATH_PORTABLE(statement, regex) \
+ LOG(WARNING) << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified.";
+#endif // defined(NDEBUG)
+#endif // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+namespace media {
+
+// These are the (fake) key systems that are registered for these tests.
+// kUsesAes uses the AesDecryptor like Clear Key.
+// kExternal uses an external CDM, such as Pepper-based or Android platform CDM.
+const char kUsesAes[] = "x-org.example.clear";
+const char kUsesAesParent[] = "x-org.example"; // Not registered.
+const char kUseAesNameForUMA[] = "UseAes";
+const char kExternal[] = "x-com.example.test";
+const char kExternalParent[] = "x-com.example";
+const char kExternalNameForUMA[] = "External";
+
+const char kClearKey[] = "org.w3.clearkey";
+const char kPrefixedClearKey[] = "webkit-org.w3.clearkey";
+const char kExternalClearKey[] = "org.chromium.externalclearkey";
+
+const char kAudioWebM[] = "audio/webm";
+const char kVideoWebM[] = "video/webm";
+const char kAudioFoo[] = "audio/foo";
+const char kVideoFoo[] = "video/foo";
+
+// Pick some arbitrary bit fields as long as they are not in conflict with the
+// real ones.
+enum TestCodec {
+ TEST_CODEC_FOO_AUDIO = 1 << 10, // An audio codec for foo container.
+ TEST_CODEC_FOO_AUDIO_ALL = TEST_CODEC_FOO_AUDIO,
+ TEST_CODEC_FOO_VIDEO = 1 << 11, // A video codec for foo container.
+ TEST_CODEC_FOO_VIDEO_ALL = TEST_CODEC_FOO_VIDEO,
+ TEST_CODEC_FOO_ALL = TEST_CODEC_FOO_AUDIO_ALL | TEST_CODEC_FOO_VIDEO_ALL
+};
+
+static_assert((TEST_CODEC_FOO_ALL & EME_CODEC_ALL) == EME_CODEC_NONE,
+ "test codec masks should only use invalid codec masks");
+
+// Adapt IsSupportedKeySystemWithMediaMimeType() to the new API,
+// IsSupportedCodecCombination().
+static bool IsSupportedKeySystemWithMediaMimeType(
+ const std::string& mime_type,
+ const std::vector<std::string>& codecs,
+ const std::string& key_system) {
+ return (KeySystems::GetInstance()->GetContentTypeConfigRule(
+ key_system, EmeMediaType::VIDEO, mime_type, codecs) !=
+ EmeConfigRule::NOT_SUPPORTED);
+}
+
+static bool IsSupportedKeySystemWithAudioMimeType(
+ const std::string& mime_type,
+ const std::vector<std::string>& codecs,
+ const std::string& key_system) {
+ return (KeySystems::GetInstance()->GetContentTypeConfigRule(
+ key_system, EmeMediaType::AUDIO, mime_type, codecs) !=
+ EmeConfigRule::NOT_SUPPORTED);
+}
+
+// Adds test container and codec masks.
+// This function must be called after SetMediaClient() if a MediaClient will be
+// provided.
+// More details: AddXxxMask() will create KeySystems if it hasn't been created.
+// During KeySystems's construction GetMediaClient() will be used to add key
+// systems. In test code, the MediaClient is set by SetMediaClient().
+// Therefore, SetMediaClient() must be called before this function to make sure
+// MediaClient in effect when constructing KeySystems.
+static void AddContainerAndCodecMasksForTest() {
+ // Since KeySystems is a singleton. Make sure we only add test container and
+ // codec masks once per process.
+ static bool is_test_masks_added = false;
+
+ if (is_test_masks_added)
+ return;
+
+ AddContainerMask("audio/foo", TEST_CODEC_FOO_AUDIO_ALL);
+ AddContainerMask("video/foo", TEST_CODEC_FOO_ALL);
+ AddCodecMask(EmeMediaType::AUDIO, "fooaudio", TEST_CODEC_FOO_AUDIO);
+ AddCodecMask(EmeMediaType::VIDEO, "foovideo", TEST_CODEC_FOO_VIDEO);
+
+ is_test_masks_added = true;
+}
+
+class TestMediaClient : public MediaClient {
+ public:
+ TestMediaClient();
+ ~TestMediaClient() override;
+
+ // MediaClient implementation.
+ void AddKeySystemsInfoForUMA(
+ std::vector<KeySystemInfoForUMA>* key_systems_info_for_uma) final;
+ bool IsKeySystemsUpdateNeeded() final;
+ void AddSupportedKeySystems(
+ std::vector<KeySystemInfo>* key_systems_info) override;
+
+ // Helper function to test the case where IsKeySystemsUpdateNeeded() is true
+ // after AddSupportedKeySystems() is called.
+ void SetKeySystemsUpdateNeeded();
+
+ // Helper function to disable "kExternal" key system support so that we can
+ // test the key system update case.
+ void DisableExternalKeySystemSupport();
+
+ protected:
+ void AddUsesAesKeySystem(const std::string& name,
+ std::vector<KeySystemInfo>* key_systems_info);
+ void AddExternalKeySystem(
+ std::vector<KeySystemInfo>* key_systems_info);
+
+ private:
+ bool is_update_needed_;
+ bool supports_external_key_system_;
+};
+
+TestMediaClient::TestMediaClient()
+ : is_update_needed_(true), supports_external_key_system_(true) {
+}
+
+TestMediaClient::~TestMediaClient() {
+}
+
+void TestMediaClient::AddKeySystemsInfoForUMA(
+ std::vector<KeySystemInfoForUMA>* key_systems_info_for_uma) {
+ key_systems_info_for_uma->push_back(
+ media::KeySystemInfoForUMA(kUsesAes, kUseAesNameForUMA, false));
+ key_systems_info_for_uma->push_back(
+ media::KeySystemInfoForUMA(kExternal, kExternalNameForUMA, true));
+}
+
+bool TestMediaClient::IsKeySystemsUpdateNeeded() {
+ return is_update_needed_;
+}
+
+void TestMediaClient::AddSupportedKeySystems(
+ std::vector<KeySystemInfo>* key_systems) {
+ DCHECK(is_update_needed_);
+
+ AddUsesAesKeySystem(kUsesAes, key_systems);
+
+ if (supports_external_key_system_)
+ AddExternalKeySystem(key_systems);
+
+ is_update_needed_ = false;
+}
+
+void TestMediaClient::SetKeySystemsUpdateNeeded() {
+ is_update_needed_ = true;
+}
+
+void TestMediaClient::DisableExternalKeySystemSupport() {
+ supports_external_key_system_ = false;
+}
+
+void TestMediaClient::AddUsesAesKeySystem(
+ const std::string& name,
+ std::vector<KeySystemInfo>* key_systems) {
+ KeySystemInfo system;
+ system.key_system = name;
+ system.supported_codecs = EME_CODEC_WEBM_ALL;
+ system.supported_codecs |= TEST_CODEC_FOO_ALL;
+ system.supported_init_data_types = kInitDataTypeMaskWebM;
+ system.max_audio_robustness = EmeRobustness::EMPTY;
+ system.max_video_robustness = EmeRobustness::EMPTY;
+ system.persistent_license_support = EmeSessionTypeSupport::NOT_SUPPORTED;
+ system.persistent_release_message_support =
+ EmeSessionTypeSupport::NOT_SUPPORTED;
+ system.persistent_state_support = EmeFeatureSupport::NOT_SUPPORTED;
+ system.distinctive_identifier_support = EmeFeatureSupport::NOT_SUPPORTED;
+ system.use_aes_decryptor = true;
+ key_systems->push_back(system);
+}
+
+void TestMediaClient::AddExternalKeySystem(
+ std::vector<KeySystemInfo>* key_systems) {
+ KeySystemInfo ext;
+ ext.key_system = kExternal;
+ ext.supported_codecs = EME_CODEC_WEBM_ALL;
+ ext.supported_codecs |= TEST_CODEC_FOO_ALL;
+ ext.supported_init_data_types = kInitDataTypeMaskWebM;
+ ext.max_audio_robustness = EmeRobustness::EMPTY;
+ ext.max_video_robustness = EmeRobustness::EMPTY;
+ ext.persistent_license_support = EmeSessionTypeSupport::SUPPORTED;
+ ext.persistent_release_message_support = EmeSessionTypeSupport::NOT_SUPPORTED;
+ ext.persistent_state_support = EmeFeatureSupport::ALWAYS_ENABLED;
+ ext.distinctive_identifier_support = EmeFeatureSupport::ALWAYS_ENABLED;
+ ext.parent_key_system = kExternalParent;
+#if defined(ENABLE_PEPPER_CDMS)
+ ext.pepper_type = "application/x-ppapi-external-cdm";
+#endif // defined(ENABLE_PEPPER_CDMS)
+ key_systems->push_back(ext);
+}
+
+class PotentiallySupportedNamesTestMediaClient : public TestMediaClient {
+ void AddSupportedKeySystems(
+ std::vector<KeySystemInfo>* key_systems_info) final;
+};
+
+void PotentiallySupportedNamesTestMediaClient::AddSupportedKeySystems(
+ std::vector<KeySystemInfo>* key_systems) {
+ // org.w3.clearkey is automatically registered.
+ AddUsesAesKeySystem("com.widevine.alpha", key_systems);
+ AddUsesAesKeySystem("org.chromium.externalclearkey", key_systems);
+ AddUsesAesKeySystem("org.chromium.externalclearkey.something", key_systems);
+ AddUsesAesKeySystem("com.chromecast.something", key_systems);
+ AddUsesAesKeySystem("x-something", key_systems);
+}
+
+class KeySystemsPotentiallySupportedNamesTest : public testing::Test {
+ protected:
+ KeySystemsPotentiallySupportedNamesTest() {
+ SetMediaClient(&test_media_client_);
+ }
+
+ ~KeySystemsPotentiallySupportedNamesTest() override {
+ // Clear the use of |test_media_client_|, which was set in SetUp().
+ SetMediaClient(nullptr);
+ }
+
+ private:
+ PotentiallySupportedNamesTestMediaClient test_media_client_;
+};
+
+class KeySystemsTest : public testing::Test {
+ protected:
+ KeySystemsTest() {
+ vp8_codec_.push_back("vp8");
+
+ vp80_codec_.push_back("vp8.0");
+
+ vp9_codec_.push_back("vp9");
+
+ vp90_codec_.push_back("vp9.0");
+
+ vorbis_codec_.push_back("vorbis");
+
+ vp8_and_vorbis_codecs_.push_back("vp8");
+ vp8_and_vorbis_codecs_.push_back("vorbis");
+
+ vp9_and_vorbis_codecs_.push_back("vp9");
+ vp9_and_vorbis_codecs_.push_back("vorbis");
+
+ foovideo_codec_.push_back("foovideo");
+
+ foovideo_extended_codec_.push_back("foovideo.4D400C");
+
+ foovideo_dot_codec_.push_back("foovideo.");
+
+ fooaudio_codec_.push_back("fooaudio");
+
+ foovideo_and_fooaudio_codecs_.push_back("foovideo");
+ foovideo_and_fooaudio_codecs_.push_back("fooaudio");
+
+ unknown_codec_.push_back("unknown");
+
+ mixed_codecs_.push_back("vorbis");
+ mixed_codecs_.push_back("foovideo");
+
+ SetMediaClient(&test_media_client_);
+ }
+
+ void SetUp() override {
+ AddContainerAndCodecMasksForTest();
+ }
+
+ ~KeySystemsTest() override {
+ // Clear the use of |test_media_client_|, which was set in SetUp().
+ SetMediaClient(nullptr);
+ }
+
+ void UpdateClientKeySystems() {
+ test_media_client_.SetKeySystemsUpdateNeeded();
+ test_media_client_.DisableExternalKeySystemSupport();
+ }
+
+ typedef std::vector<std::string> CodecVector;
+
+ const CodecVector& no_codecs() const { return no_codecs_; }
+
+ const CodecVector& vp8_codec() const { return vp8_codec_; }
+ const CodecVector& vp80_codec() const { return vp80_codec_; }
+ const CodecVector& vp9_codec() const { return vp9_codec_; }
+ const CodecVector& vp90_codec() const { return vp90_codec_; }
+
+ const CodecVector& vorbis_codec() const { return vorbis_codec_; }
+
+ const CodecVector& vp8_and_vorbis_codecs() const {
+ return vp8_and_vorbis_codecs_;
+ }
+ const CodecVector& vp9_and_vorbis_codecs() const {
+ return vp9_and_vorbis_codecs_;
+ }
+
+ const CodecVector& foovideo_codec() const { return foovideo_codec_; }
+ const CodecVector& foovideo_extended_codec() const {
+ return foovideo_extended_codec_;
+ }
+ const CodecVector& foovideo_dot_codec() const { return foovideo_dot_codec_; }
+ const CodecVector& fooaudio_codec() const { return fooaudio_codec_; }
+ const CodecVector& foovideo_and_fooaudio_codecs() const {
+ return foovideo_and_fooaudio_codecs_;
+ }
+
+ const CodecVector& unknown_codec() const { return unknown_codec_; }
+
+ const CodecVector& mixed_codecs() const { return mixed_codecs_; }
+
+ private:
+ const CodecVector no_codecs_;
+ CodecVector vp8_codec_;
+ CodecVector vp80_codec_;
+ CodecVector vp9_codec_;
+ CodecVector vp90_codec_;
+ CodecVector vorbis_codec_;
+ CodecVector vp8_and_vorbis_codecs_;
+ CodecVector vp9_and_vorbis_codecs_;
+
+ CodecVector foovideo_codec_;
+ CodecVector foovideo_extended_codec_;
+ CodecVector foovideo_dot_codec_;
+ CodecVector fooaudio_codec_;
+ CodecVector foovideo_and_fooaudio_codecs_;
+
+ CodecVector unknown_codec_;
+
+ CodecVector mixed_codecs_;
+
+ TestMediaClient test_media_client_;
+};
+
+// TODO(ddorwin): Consider moving GetPepperType() calls out to their own test.
+
+TEST_F(KeySystemsTest, EmptyKeySystem) {
+ EXPECT_FALSE(IsSupportedKeySystem(std::string()));
+ EXPECT_EQ("Unknown", GetKeySystemNameForUMA(std::string()));
+}
+
+// Clear Key is the only key system registered in content.
+TEST_F(KeySystemsTest, ClearKey) {
+ EXPECT_TRUE(IsSupportedKeySystem(kClearKey));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kClearKey));
+
+ EXPECT_EQ("ClearKey", GetKeySystemNameForUMA(kClearKey));
+
+ // Prefixed Clear Key is not supported internally.
+ EXPECT_FALSE(IsSupportedKeySystem(kPrefixedClearKey));
+ EXPECT_EQ("Unknown", GetKeySystemNameForUMA(kPrefixedClearKey));
+}
+
+TEST_F(KeySystemsTest, ClearKeyWithInitDataType) {
+ EXPECT_TRUE(IsSupportedKeySystem(kClearKey));
+ EXPECT_TRUE(
+ IsSupportedKeySystemWithInitDataType(kClearKey, EmeInitDataType::WEBM));
+ EXPECT_TRUE(
+ IsSupportedKeySystemWithInitDataType(kClearKey, EmeInitDataType::KEYIDS));
+
+ // All other InitDataTypes are not supported.
+ EXPECT_FALSE(IsSupportedKeySystemWithInitDataType(kClearKey,
+ EmeInitDataType::UNKNOWN));
+}
+
+// The key system is not registered and therefore is unrecognized.
+TEST_F(KeySystemsTest, Basic_UnrecognizedKeySystem) {
+ static const char* const kUnrecognized = "x-org.example.unrecognized";
+
+ EXPECT_FALSE(IsSupportedKeySystem(kUnrecognized));
+
+ EXPECT_EQ("Unknown", GetKeySystemNameForUMA(kUnrecognized));
+
+ bool can_use = false;
+ EXPECT_DEBUG_DEATH_PORTABLE(
+ can_use = CanUseAesDecryptor(kUnrecognized),
+ "x-org.example.unrecognized is not a known concrete system");
+ EXPECT_FALSE(can_use);
+
+#if defined(ENABLE_PEPPER_CDMS)
+ std::string type;
+ EXPECT_DEBUG_DEATH(
+ type = GetPepperType(kUnrecognized),
+ "x-org.example.unrecognized is not a known concrete system");
+ EXPECT_TRUE(type.empty());
+#endif
+}
+
+TEST_F(KeySystemsTest, Basic_UsesAesDecryptor) {
+ EXPECT_TRUE(IsSupportedKeySystem(kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kUsesAes));
+
+ // No UMA value for this test key system.
+ EXPECT_EQ("UseAes", GetKeySystemNameForUMA(kUsesAes));
+
+ EXPECT_TRUE(CanUseAesDecryptor(kUsesAes));
+#if defined(ENABLE_PEPPER_CDMS)
+ std::string type;
+ EXPECT_DEBUG_DEATH(type = GetPepperType(kUsesAes),
+ "x-org.example.clear is not Pepper-based");
+ EXPECT_TRUE(type.empty());
+#endif
+}
+
+TEST_F(KeySystemsTest,
+ IsSupportedKeySystemWithMediaMimeType_UsesAesDecryptor_TypesContainer1) {
+ // Valid video types.
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp8_codec(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp80_codec(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_codec(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp90_codec(), kUsesAes));
+
+ // Audio in a video container.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp8_and_vorbis_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_and_vorbis_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vorbis_codec(), kUsesAes));
+
+ // Non-Webm codecs.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, foovideo_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, unknown_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, mixed_codecs(), kUsesAes));
+
+ // Valid audio types.
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, no_codecs(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, vorbis_codec(), kUsesAes));
+
+ // Non-audio codecs.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp8_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp8_and_vorbis_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp9_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp9_and_vorbis_codecs(), kUsesAes));
+
+ // Non-Webm codec.
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, fooaudio_codec(), kUsesAes));
+}
+
+// No parent is registered for UsesAes.
+TEST_F(KeySystemsTest, Parent_NoParentRegistered) {
+ EXPECT_FALSE(IsSupportedKeySystem(kUsesAesParent));
+
+ // The parent is not supported for most things.
+ EXPECT_EQ("Unknown", GetKeySystemNameForUMA(kUsesAesParent));
+ bool result = false;
+ EXPECT_DEBUG_DEATH_PORTABLE(result = CanUseAesDecryptor(kUsesAesParent),
+ "x-org.example is not a known concrete system");
+ EXPECT_FALSE(result);
+#if defined(ENABLE_PEPPER_CDMS)
+ std::string type;
+ EXPECT_DEBUG_DEATH(type = GetPepperType(kUsesAesParent),
+ "x-org.example is not a known concrete system");
+ EXPECT_TRUE(type.empty());
+#endif
+}
+
+TEST_F(KeySystemsTest, IsSupportedKeySystem_InvalidVariants) {
+ // Case sensitive.
+ EXPECT_FALSE(IsSupportedKeySystem("x-org.example.ClEaR"));
+
+ // TLDs are not allowed.
+ EXPECT_FALSE(IsSupportedKeySystem("org."));
+ EXPECT_FALSE(IsSupportedKeySystem("com"));
+
+ // Extra period.
+ EXPECT_FALSE(IsSupportedKeySystem("x-org.example.clear."));
+ EXPECT_FALSE(IsSupportedKeySystem("x-org.example."));
+
+ // Incomplete.
+ EXPECT_FALSE(IsSupportedKeySystem("x-org.example.clea"));
+
+ // Extra character.
+ EXPECT_FALSE(IsSupportedKeySystem("x-org.example.clearz"));
+
+ // There are no child key systems for UsesAes.
+ EXPECT_FALSE(IsSupportedKeySystem("x-org.example.clear.foo"));
+}
+
+TEST_F(KeySystemsTest, IsSupportedKeySystemWithMediaMimeType_NoType) {
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ std::string(), no_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ std::string(), no_codecs(), kUsesAesParent));
+
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(std::string(), no_codecs(),
+ "x-org.example.foo"));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ std::string(), no_codecs(), "x-org.example.clear.foo"));
+}
+
+// Tests the second registered container type.
+// TODO(ddorwin): Combined with TypesContainer1 in a future CL.
+TEST_F(KeySystemsTest,
+ IsSupportedKeySystemWithMediaMimeType_UsesAesDecryptor_TypesContainer2) {
+ // Valid video types.
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, no_codecs(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_codec(), kUsesAes));
+
+ // Audio in a video container.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_and_fooaudio_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, fooaudio_codec(), kUsesAes));
+
+ // Extended codecs fail because this is handled by SimpleWebMimeRegistryImpl.
+ // They should really pass canPlayType().
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_extended_codec(), kUsesAes));
+
+ // Invalid codec format.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_dot_codec(), kUsesAes));
+
+ // Non-container2 codec.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, vp8_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, unknown_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, mixed_codecs(), kUsesAes));
+
+ // Valid audio types.
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, no_codecs(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, fooaudio_codec(), kUsesAes));
+
+ // Non-audio codecs.
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, foovideo_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, foovideo_and_fooaudio_codecs(), kUsesAes));
+
+ // Non-container2 codec.
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, vorbis_codec(), kUsesAes));
+}
+
+//
+// Non-AesDecryptor-based key system.
+//
+
+TEST_F(KeySystemsTest, Basic_ExternalDecryptor) {
+ EXPECT_TRUE(IsSupportedKeySystem(kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kExternal));
+
+ EXPECT_FALSE(CanUseAesDecryptor(kExternal));
+#if defined(ENABLE_PEPPER_CDMS)
+ EXPECT_EQ("application/x-ppapi-external-cdm", GetPepperType(kExternal));
+#endif // defined(ENABLE_PEPPER_CDMS)
+}
+
+TEST_F(KeySystemsTest, Parent_ParentRegistered) {
+ // Unprefixed has no parent key system support.
+ EXPECT_FALSE(IsSupportedKeySystem(kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kExternalParent));
+
+ // The parent is not supported for most things.
+ EXPECT_EQ("Unknown", GetKeySystemNameForUMA(kExternalParent));
+ bool result = false;
+ EXPECT_DEBUG_DEATH_PORTABLE(result = CanUseAesDecryptor(kExternalParent),
+ "x-com.example is not a known concrete system");
+ EXPECT_FALSE(result);
+#if defined(ENABLE_PEPPER_CDMS)
+ std::string type;
+ EXPECT_DEBUG_DEATH(type = GetPepperType(kExternalParent),
+ "x-com.example is not a known concrete system");
+ EXPECT_TRUE(type.empty());
+#endif
+}
+
+TEST_F(
+ KeySystemsTest,
+ IsSupportedKeySystemWithMediaMimeType_ExternalDecryptor_TypesContainer1) {
+ // Valid video types.
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp8_codec(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp80_codec(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_codec(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp90_codec(), kExternal));
+
+ // Audio in a video container.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp8_and_vorbis_codecs(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_and_vorbis_codecs(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vorbis_codec(), kExternal));
+
+ // Valid video types - parent key system.
+ // Prefixed has parent key system support.
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp8_codec(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp80_codec(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp8_and_vorbis_codecs(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_codec(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp90_codec(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_and_vorbis_codecs(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vorbis_codec(), kExternalParent));
+
+ // Non-Webm codecs.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, foovideo_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, unknown_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, mixed_codecs(), kExternal));
+
+ // Valid audio types.
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, no_codecs(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, vorbis_codec(), kExternal));
+
+ // Valid audio types - parent key system.
+ // Prefixed has parent key system support.
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, no_codecs(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vorbis_codec(), kExternalParent));
+
+ // Non-audio codecs.
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, vp8_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, vp8_and_vorbis_codecs(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, vp9_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, vp9_and_vorbis_codecs(), kExternal));
+
+ // Non-Webm codec.
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioWebM, fooaudio_codec(), kExternal));
+}
+
+TEST_F(
+ KeySystemsTest,
+ IsSupportedKeySystemWithMediaMimeType_ExternalDecryptor_TypesContainer2) {
+ // Valid video types.
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, no_codecs(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_codec(), kExternal));
+
+ // Audio in a video container.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_and_fooaudio_codecs(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, fooaudio_codec(), kExternal));
+
+ // Valid video types - parent key system.
+ // Prefixed has parent key system support.
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, no_codecs(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_codec(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_and_fooaudio_codecs(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, fooaudio_codec(), kExternalParent));
+
+ // Extended codecs fail because this is handled by SimpleWebMimeRegistryImpl.
+ // They should really pass canPlayType().
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_extended_codec(), kExternal));
+
+ // Invalid codec format.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, foovideo_dot_codec(), kExternal));
+
+ // Non-container2 codecs.
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, vp8_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, unknown_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoFoo, mixed_codecs(), kExternal));
+
+ // Valid audio types.
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, no_codecs(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, fooaudio_codec(), kExternal));
+
+ // Valid audio types - parent key system.
+ // Prefixed has parent key system support.
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kAudioFoo, no_codecs(), kExternalParent));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kAudioFoo, fooaudio_codec(), kExternalParent));
+
+ // Non-audio codecs.
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, foovideo_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, foovideo_and_fooaudio_codecs(), kExternal));
+
+ // Non-container2 codec.
+ EXPECT_FALSE(IsSupportedKeySystemWithAudioMimeType(
+ kAudioFoo, vorbis_codec(), kExternal));
+}
+
+TEST_F(KeySystemsTest, KeySystemNameForUMA) {
+ EXPECT_EQ("ClearKey", GetKeySystemNameForUMA(kClearKey));
+ // Prefixed is not supported internally.
+ EXPECT_EQ("Unknown", GetKeySystemNameForUMA(kPrefixedClearKey));
+
+ // External Clear Key never has a UMA name.
+ EXPECT_EQ("Unknown", GetKeySystemNameForUMA(kExternalClearKey));
+}
+
+TEST_F(KeySystemsTest, KeySystemsUpdate) {
+ EXPECT_TRUE(IsSupportedKeySystem(kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystem(kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kExternal));
+
+ UpdateClientKeySystems();
+
+ EXPECT_TRUE(IsSupportedKeySystem(kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystem(kExternal));
+}
+
+TEST_F(KeySystemsTest, PrefixedKeySystemsUpdate) {
+ EXPECT_TRUE(IsSupportedKeySystem(kUsesAes));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystem(kExternal));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kExternal));
+
+ UpdateClientKeySystems();
+
+ EXPECT_TRUE(IsSupportedKeySystem(kUsesAes));
+ EXPECT_TRUE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystem(kExternal));
+ EXPECT_FALSE(PrefixedIsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), kExternal));
+}
+
+TEST_F(KeySystemsPotentiallySupportedNamesTest, PotentiallySupportedNames) {
+ EXPECT_FALSE(IsSupportedKeySystem("org.w3"));
+ EXPECT_FALSE(IsSupportedKeySystem("org.w3."));
+ EXPECT_FALSE(IsSupportedKeySystem("org.w3.clearke"));
+ EXPECT_TRUE(IsSupportedKeySystem("org.w3.clearkey"));
+ EXPECT_FALSE(IsSupportedKeySystem("org.w3.clearkeys"));
+
+ EXPECT_FALSE(IsSupportedKeySystem("com.widevine"));
+ EXPECT_FALSE(IsSupportedKeySystem("com.widevine."));
+ EXPECT_FALSE(IsSupportedKeySystem("com.widevine.alph"));
+ EXPECT_TRUE(IsSupportedKeySystem("com.widevine.alpha"));
+ EXPECT_FALSE(IsSupportedKeySystem("com.widevine.beta"));
+ EXPECT_FALSE(IsSupportedKeySystem("com.widevine.alphabeta"));
+ EXPECT_FALSE(IsSupportedKeySystem("com.widevine.alpha.beta"));
+
+ EXPECT_FALSE(IsSupportedKeySystem("org.chromium"));
+ EXPECT_FALSE(IsSupportedKeySystem("org.chromium."));
+ EXPECT_FALSE(IsSupportedKeySystem("org.chromium.externalclearke"));
+ EXPECT_TRUE(IsSupportedKeySystem("org.chromium.externalclearkey"));
+ EXPECT_FALSE(IsSupportedKeySystem("org.chromium.externalclearkeys"));
+ EXPECT_FALSE(IsSupportedKeySystem("org.chromium.externalclearkey."));
+ EXPECT_TRUE(IsSupportedKeySystem("org.chromium.externalclearkey.something"));
+ EXPECT_FALSE(
+ IsSupportedKeySystem("org.chromium.externalclearkey.something.else"));
+ EXPECT_FALSE(IsSupportedKeySystem("org.chromium.externalclearkey.other"));
+ EXPECT_FALSE(IsSupportedKeySystem("org.chromium.other"));
+
+ EXPECT_FALSE(IsSupportedKeySystem("com.chromecast"));
+ EXPECT_FALSE(IsSupportedKeySystem("com.chromecast."));
+ EXPECT_TRUE(IsSupportedKeySystem("com.chromecast.something"));
+ EXPECT_FALSE(IsSupportedKeySystem("com.chromecast.something.else"));
+ EXPECT_FALSE(IsSupportedKeySystem("com.chromecast.other"));
+
+ EXPECT_FALSE(IsSupportedKeySystem("x-"));
+ EXPECT_TRUE(IsSupportedKeySystem("x-something"));
+ EXPECT_FALSE(IsSupportedKeySystem("x-something.else"));
+ EXPECT_FALSE(IsSupportedKeySystem("x-other"));
+}
+
+} // namespace media
diff --git a/chromium/media/base/limits.h b/chromium/media/base/limits.h
index 6ab19537c19..c3cd6777d6c 100644
--- a/chromium/media/base/limits.h
+++ b/chromium/media/base/limits.h
@@ -48,10 +48,13 @@ enum {
// lengths are somewhat arbitrary as the EME spec doesn't specify any limits.
kMinCertificateLength = 128,
kMaxCertificateLength = 16 * 1024,
- kMaxWebSessionIdLength = 512,
+ kMaxSessionIdLength = 512,
kMinKeyIdLength = 1,
kMaxKeyIdLength = 512,
kMaxKeyIds = 128,
+ kMaxInitDataLength = 64 * 1024, // 64 KB
+ kMaxSessionResponseLength = 64 * 1024, // 64 KB
+ kMaxKeySystemLength = 256,
};
} // namespace limits
diff --git a/chromium/media/base/mac/BUILD.gn b/chromium/media/base/mac/BUILD.gn
index c575b7adc9d..c1ebc9e64e7 100644
--- a/chromium/media/base/mac/BUILD.gn
+++ b/chromium/media/base/mac/BUILD.gn
@@ -10,6 +10,8 @@ source_set("mac") {
"coremedia_glue.h",
"coremedia_glue.mm",
"corevideo_glue.h",
+ "video_frame_mac.cc",
+ "video_frame_mac.h",
"videotoolbox_glue.h",
"videotoolbox_glue.mm",
]
@@ -18,6 +20,22 @@ source_set("mac") {
"avfoundation_glue.h",
"avfoundation_glue.mm",
]
+ libs = [
+ # Required by video_frame_mac.cc.
+ "CoreVideo.framework",
+ ]
}
set_sources_assignment_filter(sources_assignment_filter)
+ configs += [ "//media:media_config" ]
+}
+
+source_set("unittests") {
+ testonly = true
+ sources = [
+ "video_frame_mac_unittests.cc",
+ ]
+ configs += [ "//media:media_config" ]
+ deps = [
+ "//testing/gtest",
+ ]
}
diff --git a/chromium/media/base/mac/avfoundation_glue.h b/chromium/media/base/mac/avfoundation_glue.h
index 73a747b0d71..df54ae0aed9 100644
--- a/chromium/media/base/mac/avfoundation_glue.h
+++ b/chromium/media/base/mac/avfoundation_glue.h
@@ -12,7 +12,9 @@
#ifndef MEDIA_BASE_MAC_AVFOUNDATION_GLUE_H_
#define MEDIA_BASE_MAC_AVFOUNDATION_GLUE_H_
+#if defined(__OBJC__)
#import <Foundation/Foundation.h>
+#endif // defined(__OBJC__)
#include "base/basictypes.h"
#include "media/base/mac/coremedia_glue.h"
@@ -20,14 +22,17 @@
class MEDIA_EXPORT AVFoundationGlue {
public:
+ // Must be called on the UI thread prior to attempting to use any other
+ // AVFoundation methods.
+ static void InitializeAVFoundation();
+
// This method returns true if the OS version supports AVFoundation and the
// AVFoundation bundle could be loaded correctly, or false otherwise.
static bool IsAVFoundationSupported();
+#if defined(__OBJC__)
static NSBundle const* AVFoundationBundle();
- static void* AVFoundationLibraryHandle();
-
// Originally coming from AVCaptureDevice.h but in global namespace.
static NSString* AVCaptureDeviceWasConnectedNotification();
static NSString* AVCaptureDeviceWasDisconnectedNotification();
@@ -48,11 +53,14 @@ class MEDIA_EXPORT AVFoundationGlue {
static Class AVCaptureSessionClass();
static Class AVCaptureVideoDataOutputClass();
+#endif // defined(__OBJC__)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AVFoundationGlue);
};
+#if defined(__OBJC__)
+
// Originally AVCaptureDevice and coming from AVCaptureDevice.h
MEDIA_EXPORT
@interface CrAVCaptureDevice : NSObject
@@ -169,4 +177,6 @@ MEDIA_EXPORT
@end
+#endif // defined(__OBJC__)
+
#endif // MEDIA_BASE_MAC_AVFOUNDATION_GLUE_H_
diff --git a/chromium/media/base/mac/avfoundation_glue.mm b/chromium/media/base/mac/avfoundation_glue.mm
index c9985c4e0c6..aed571d5d47 100644
--- a/chromium/media/base/mac/avfoundation_glue.mm
+++ b/chromium/media/base/mac/avfoundation_glue.mm
@@ -9,11 +9,29 @@
#include "base/command_line.h"
#include "base/lazy_instance.h"
#include "base/mac/mac_util.h"
-#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
#include "media/base/media_switches.h"
namespace {
+// Used for logging capture API usage. Classes are a partition. Elements in this
+// enum should not be deleted or rearranged; the only permitted operation is to
+// add new elements before CAPTURE_API_MAX, that must be equal to the last item.
+enum CaptureApi {
+ CAPTURE_API_QTKIT_DUE_TO_OS_PREVIOUS_TO_LION = 0,
+ CAPTURE_API_QTKIT_FORCED_BY_FLAG = 1,
+ CAPTURE_API_QTKIT_DUE_TO_NO_FLAG = 2,
+ CAPTURE_API_QTKIT_DUE_TO_AVFOUNDATION_LOAD_ERROR = 3,
+ CAPTURE_API_AVFOUNDATION_LOADED_OK = 4,
+ CAPTURE_API_MAX = CAPTURE_API_AVFOUNDATION_LOADED_OK
+};
+
+void LogCaptureApi(CaptureApi api) {
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCaptureApi.Mac",
+ api,
+ CAPTURE_API_MAX + 1);
+}
+
// This class is used to retrieve AVFoundation NSBundle and library handle. It
// must be used as a LazyInstance so that it is initialised once and in a
// thread-safe way. Normally no work is done in constructors: LazyInstance is
@@ -57,7 +75,6 @@ class AVFoundationInternal {
}
NSBundle* bundle() const { return bundle_; }
- void* library_handle() const { return library_handle_; }
NSString* AVCaptureDeviceWasConnectedNotification() const {
return AVCaptureDeviceWasConnectedNotification_;
@@ -100,43 +117,59 @@ class AVFoundationInternal {
DISALLOW_COPY_AND_ASSIGN(AVFoundationInternal);
};
-} // namespace
-
-static base::LazyInstance<AVFoundationInternal> g_avfoundation_handle =
- LAZY_INSTANCE_INITIALIZER;
-
-bool AVFoundationGlue::IsAVFoundationSupported() {
- // DeviceMonitorMac will initialize this static bool from the main UI thread
- // once, during Chrome startup so this construction is thread safe.
- // Use AVFoundation if possible, enabled, and QTKit is not explicitly forced.
- static CommandLine* command_line = CommandLine::ForCurrentProcess();
-
+// This contains the logic of checking whether AVFoundation is supported.
+// It's called only once and the results are cached in a static bool.
+bool LoadAVFoundationInternal() {
// AVFoundation is only available on OS Lion and above.
- if (!base::mac::IsOSLionOrLater())
+ if (!base::mac::IsOSLionOrLater()) {
+ LogCaptureApi(CAPTURE_API_QTKIT_DUE_TO_OS_PREVIOUS_TO_LION);
return false;
+ }
+ const base::CommandLine* command_line =
+ base::CommandLine::ForCurrentProcess();
// The force-qtkit flag takes precedence over enable-avfoundation.
- if (command_line->HasSwitch(switches::kForceQTKit))
+ if (command_line->HasSwitch(switches::kForceQTKit)) {
+ LogCaptureApi(CAPTURE_API_QTKIT_FORCED_BY_FLAG);
+ return false;
+ }
+
+ if (!command_line->HasSwitch(switches::kEnableAVFoundation)) {
+ LogCaptureApi(CAPTURE_API_QTKIT_DUE_TO_NO_FLAG);
return false;
+ }
+ const bool ret = [AVFoundationGlue::AVFoundationBundle() load];
+ LogCaptureApi(ret ? CAPTURE_API_AVFOUNDATION_LOADED_OK
+ : CAPTURE_API_QTKIT_DUE_TO_AVFOUNDATION_LOAD_ERROR);
+ return ret;
+}
+
+} // namespace
+
+static base::LazyInstance<AVFoundationInternal>::Leaky g_avfoundation_handle =
+ LAZY_INSTANCE_INITIALIZER;
- // Next in precedence is the enable-avfoundation flag.
- // TODO(vrk): Does this really need to be static?
- static bool should_enable_avfoundation =
- command_line->HasSwitch(switches::kEnableAVFoundation) ||
- base::FieldTrialList::FindFullName("AVFoundationMacVideoCapture")
- == "Enabled";
- // Try to load AVFoundation. Save result in static bool to avoid loading
- // AVFoundationBundle every call.
- static bool loaded_successfully = [AVFoundationBundle() load];
- return should_enable_avfoundation && loaded_successfully;
+enum {
+ INITIALIZE_NOT_CALLED = 0,
+ AVFOUNDATION_IS_SUPPORTED,
+ AVFOUNDATION_NOT_SUPPORTED
+} static g_avfoundation_initialization = INITIALIZE_NOT_CALLED;
+
+void AVFoundationGlue::InitializeAVFoundation() {
+ CHECK([NSThread isMainThread]);
+ if (g_avfoundation_initialization != INITIALIZE_NOT_CALLED)
+ return;
+ g_avfoundation_initialization = LoadAVFoundationInternal() ?
+ AVFOUNDATION_IS_SUPPORTED : AVFOUNDATION_NOT_SUPPORTED;
}
-NSBundle const* AVFoundationGlue::AVFoundationBundle() {
- return g_avfoundation_handle.Get().bundle();
+bool AVFoundationGlue::IsAVFoundationSupported() {
+ CHECK_NE(g_avfoundation_initialization, INITIALIZE_NOT_CALLED);
+ return g_avfoundation_initialization == AVFOUNDATION_IS_SUPPORTED;
}
-void* AVFoundationGlue::AVFoundationLibraryHandle() {
- return g_avfoundation_handle.Get().library_handle();
+NSBundle const* AVFoundationGlue::AVFoundationBundle() {
+ return g_avfoundation_handle.Get().bundle();
}
NSString* AVFoundationGlue::AVCaptureDeviceWasConnectedNotification() {
diff --git a/chromium/media/base/mac/corevideo_glue.h b/chromium/media/base/mac/corevideo_glue.h
index 3597a10110a..27b4d100e74 100644
--- a/chromium/media/base/mac/corevideo_glue.h
+++ b/chromium/media/base/mac/corevideo_glue.h
@@ -14,6 +14,9 @@
class MEDIA_EXPORT CoreVideoGlue {
public:
// Originally from CVPixelBuffer.h
+ enum {
+ kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange = '420v',
+ };
typedef struct CVPlanarPixelBufferInfo_YCbCrPlanar
CVPlanarPixelBufferInfo_YCbCrPlanar;
struct CVPlanarPixelBufferInfo_YCbCrBiPlanar {
diff --git a/chromium/media/base/mac/video_frame_mac.cc b/chromium/media/base/mac/video_frame_mac.cc
new file mode 100644
index 00000000000..e532ddc7511
--- /dev/null
+++ b/chromium/media/base/mac/video_frame_mac.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/mac/video_frame_mac.h"
+
+#include <algorithm>
+
+#include "media/base/mac/corevideo_glue.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+namespace {
+
+// Maximum number of planes supported by this implementation.
+const int kMaxPlanes = 3;
+
+// CVPixelBuffer release callback. See |GetCvPixelBufferRepresentation()|.
+void CvPixelBufferReleaseCallback(void* frame_ref,
+ const void* data,
+ size_t size,
+ size_t num_planes,
+ const void* planes[]) {
+ free(const_cast<void*>(data));
+ reinterpret_cast<const VideoFrame*>(frame_ref)->Release();
+}
+
+} // namespace
+
+MEDIA_EXPORT base::ScopedCFTypeRef<CVPixelBufferRef>
+WrapVideoFrameInCVPixelBuffer(const VideoFrame& frame) {
+ base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer;
+
+ // If the frame is backed by a pixel buffer, just return that buffer.
+ if (frame.cv_pixel_buffer()) {
+ pixel_buffer.reset(frame.cv_pixel_buffer(), base::scoped_policy::RETAIN);
+ return pixel_buffer;
+ }
+
+ // VideoFrame only supports YUV formats and most of them are 'YVU' ordered,
+ // which CVPixelBuffer does not support. This means we effectively can only
+ // represent I420 and NV12 frames. In addition, VideoFrame does not carry
+ // colorimetric information, so this function assumes standard video range
+ // and ITU Rec 709 primaries.
+ VideoFrame::Format video_frame_format = frame.format();
+ OSType cv_format;
+ if (video_frame_format == VideoFrame::Format::I420) {
+ cv_format = kCVPixelFormatType_420YpCbCr8Planar;
+ } else if (video_frame_format == VideoFrame::Format::NV12) {
+ cv_format = CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
+ } else {
+ DLOG(ERROR) << " unsupported frame format: " << video_frame_format;
+ return pixel_buffer;
+ }
+
+ int num_planes = VideoFrame::NumPlanes(video_frame_format);
+ DCHECK_LE(num_planes, kMaxPlanes);
+ gfx::Size coded_size = frame.coded_size();
+
+ // TODO(jfroy): Support extended pixels (i.e. padding).
+ if (coded_size != frame.visible_rect().size()) {
+ DLOG(ERROR) << " frame with extended pixels not supported: "
+ << " coded_size: " << coded_size.ToString()
+ << ", visible_rect: " << frame.visible_rect().ToString();
+ return pixel_buffer;
+ }
+
+ // Build arrays for each plane's data pointer, dimensions and byte alignment.
+ void* plane_ptrs[kMaxPlanes];
+ size_t plane_widths[kMaxPlanes];
+ size_t plane_heights[kMaxPlanes];
+ size_t plane_bytes_per_row[kMaxPlanes];
+ for (int plane_i = 0; plane_i < num_planes; ++plane_i) {
+ plane_ptrs[plane_i] = const_cast<uint8*>(frame.data(plane_i));
+ gfx::Size plane_size =
+ VideoFrame::PlaneSize(video_frame_format, plane_i, coded_size);
+ plane_widths[plane_i] = plane_size.width();
+ plane_heights[plane_i] = plane_size.height();
+ plane_bytes_per_row[plane_i] = frame.stride(plane_i);
+ }
+
+ // CVPixelBufferCreateWithPlanarBytes needs a dummy plane descriptor or the
+ // release callback will not execute. The descriptor is freed in the callback.
+ void* descriptor = calloc(
+ 1,
+ std::max(sizeof(CVPlanarPixelBufferInfo_YCbCrPlanar),
+ sizeof(CoreVideoGlue::CVPlanarPixelBufferInfo_YCbCrBiPlanar)));
+
+ // Wrap the frame's data in a CVPixelBuffer. Because this is a C API, we can't
+ // give it a smart pointer to the frame, so instead pass a raw pointer and
+ // increment the frame's reference count manually.
+ CVReturn result = CVPixelBufferCreateWithPlanarBytes(
+ kCFAllocatorDefault, coded_size.width(), coded_size.height(), cv_format,
+ descriptor, 0, num_planes, plane_ptrs, plane_widths, plane_heights,
+ plane_bytes_per_row, &CvPixelBufferReleaseCallback,
+ const_cast<VideoFrame*>(&frame), nullptr, pixel_buffer.InitializeInto());
+ if (result != kCVReturnSuccess) {
+ DLOG(ERROR) << " CVPixelBufferCreateWithPlanarBytes failed: " << result;
+ return base::ScopedCFTypeRef<CVPixelBufferRef>(nullptr);
+ }
+
+ // The CVPixelBuffer now references the data of the frame, so increment its
+ // reference count manually. The release callback set on the pixel buffer will
+ // release the frame.
+ frame.AddRef();
+
+ // Apply required colorimetric attachments.
+ CVBufferSetAttachment(pixel_buffer, kCVImageBufferColorPrimariesKey,
+ kCVImageBufferColorPrimaries_ITU_R_709_2,
+ kCVAttachmentMode_ShouldPropagate);
+ CVBufferSetAttachment(pixel_buffer, kCVImageBufferTransferFunctionKey,
+ kCVImageBufferTransferFunction_ITU_R_709_2,
+ kCVAttachmentMode_ShouldPropagate);
+ CVBufferSetAttachment(pixel_buffer, kCVImageBufferYCbCrMatrixKey,
+ kCVImageBufferYCbCrMatrix_ITU_R_709_2,
+ kCVAttachmentMode_ShouldPropagate);
+
+ return pixel_buffer;
+}
+
+} // namespace media
diff --git a/chromium/media/base/mac/video_frame_mac.h b/chromium/media/base/mac/video_frame_mac.h
new file mode 100644
index 00000000000..93bcb77b362
--- /dev/null
+++ b/chromium/media/base/mac/video_frame_mac.h
@@ -0,0 +1,30 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MAC_VIDEO_FRAME_MAC_H_
+#define MEDIA_BASE_MAC_VIDEO_FRAME_MAC_H_
+
+#include <CoreVideo/CVPixelBuffer.h>
+
+#include "base/mac/scoped_cftyperef.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class VideoFrame;
+
+// Wrap a VideoFrame's data in a CVPixelBuffer object. The frame's lifetime is
+// extended for the duration of the pixel buffer's lifetime. If the frame's data
+// is already managed by a CVPixelBuffer (the frame was created using
+// |WrapCVPixelBuffer()|, then the underlying CVPixelBuffer is returned.
+//
+// The only supported formats are I420 and NV12. Frames with extended pixels
+// (the visible rect's size does not match the coded size) are not supported.
+// If an unsupported frame is specified, null is returned.
+MEDIA_EXPORT base::ScopedCFTypeRef<CVPixelBufferRef>
+WrapVideoFrameInCVPixelBuffer(const VideoFrame& frame);
+
+} // namespace media
+
+#endif // MEDIA_BASE_MAC_VIDEO_FRAME_MAC_H_
diff --git a/chromium/media/base/mac/video_frame_mac_unittests.cc b/chromium/media/base/mac/video_frame_mac_unittests.cc
new file mode 100644
index 00000000000..96c3bc31a0c
--- /dev/null
+++ b/chromium/media/base/mac/video_frame_mac_unittests.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/mac/video_frame_mac.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "media/base/mac/corevideo_glue.h"
+#include "media/base/video_frame.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+const int kWidth = 64;
+const int kHeight = 48;
+const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
+
+struct FormatPair {
+ VideoFrame::Format chrome;
+ OSType corevideo;
+};
+
+void Increment(int* i) {
+ ++(*i);
+}
+
+} // namespace
+
+TEST(VideoFrameMac, CheckBasicAttributes) {
+ gfx::Size size(kWidth, kHeight);
+ auto frame = VideoFrame::CreateFrame(VideoFrame::I420, size, gfx::Rect(size),
+ size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+
+ auto pb = WrapVideoFrameInCVPixelBuffer(*frame);
+ ASSERT_TRUE(pb.get());
+
+ gfx::Size coded_size = frame->coded_size();
+ VideoFrame::Format format = frame->format();
+
+ EXPECT_EQ(coded_size.width(), static_cast<int>(CVPixelBufferGetWidth(pb)));
+ EXPECT_EQ(coded_size.height(), static_cast<int>(CVPixelBufferGetHeight(pb)));
+ EXPECT_EQ(VideoFrame::NumPlanes(format), CVPixelBufferGetPlaneCount(pb));
+
+ CVPixelBufferLockBaseAddress(pb, 0);
+ for (size_t i = 0; i < VideoFrame::NumPlanes(format); ++i) {
+ gfx::Size plane_size = VideoFrame::PlaneSize(format, i, coded_size);
+ EXPECT_EQ(plane_size.width(),
+ static_cast<int>(CVPixelBufferGetWidthOfPlane(pb, i)));
+ EXPECT_EQ(plane_size.height(),
+ static_cast<int>(CVPixelBufferGetHeightOfPlane(pb, i)));
+ EXPECT_EQ(frame->data(i), CVPixelBufferGetBaseAddressOfPlane(pb, i));
+ }
+ CVPixelBufferUnlockBaseAddress(pb, 0);
+}
+
+TEST(VideoFrameMac, CheckFormats) {
+ const FormatPair format_pairs[] = {
+ {VideoFrame::I420, kCVPixelFormatType_420YpCbCr8Planar},
+
+ {VideoFrame::YV12, 0},
+ {VideoFrame::YV16, 0},
+ {VideoFrame::YV12A, 0},
+ {VideoFrame::YV12J, 0},
+ {VideoFrame::YV24, 0},
+ };
+
+ gfx::Size size(kWidth, kHeight);
+ for (const auto& format_pair : format_pairs) {
+ auto frame = VideoFrame::CreateFrame(format_pair.chrome, size,
+ gfx::Rect(size), size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+ auto pb = WrapVideoFrameInCVPixelBuffer(*frame);
+ if (format_pair.corevideo) {
+ EXPECT_EQ(format_pair.corevideo, CVPixelBufferGetPixelFormatType(pb));
+ } else {
+ EXPECT_EQ(nullptr, pb.get());
+ }
+ }
+}
+
+TEST(VideoFrameMac, CheckLifetime) {
+ gfx::Size size(kWidth, kHeight);
+ auto frame = VideoFrame::CreateFrame(VideoFrame::I420, size, gfx::Rect(size),
+ size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+
+ int instances_destroyed = 0;
+ auto wrapper_frame = VideoFrame::WrapVideoFrame(
+ frame, frame->visible_rect(), frame->natural_size(),
+ base::Bind(&Increment, &instances_destroyed));
+ ASSERT_TRUE(wrapper_frame.get());
+
+ auto pb = WrapVideoFrameInCVPixelBuffer(*wrapper_frame);
+ ASSERT_TRUE(pb.get());
+
+ wrapper_frame = nullptr;
+ EXPECT_EQ(0, instances_destroyed);
+ pb.reset();
+ EXPECT_EQ(1, instances_destroyed);
+}
+
+TEST(VideoFrameMac, CheckWrapperFrame) {
+ const FormatPair format_pairs[] = {
+ {VideoFrame::I420, kCVPixelFormatType_420YpCbCr8Planar},
+ {VideoFrame::NV12,
+ CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange},
+ };
+
+ gfx::Size size(kWidth, kHeight);
+ for (const auto& format_pair : format_pairs) {
+ base::ScopedCFTypeRef<CVPixelBufferRef> pb;
+ CVPixelBufferCreate(nullptr, kWidth, kHeight, format_pair.corevideo,
+ nullptr, pb.InitializeInto());
+ ASSERT_TRUE(pb.get());
+
+ auto frame = VideoFrame::WrapCVPixelBuffer(pb.get(), kTimestamp);
+ ASSERT_TRUE(frame.get());
+ EXPECT_EQ(pb.get(), frame->cv_pixel_buffer());
+ EXPECT_EQ(format_pair.chrome, frame->format());
+
+ frame = nullptr;
+ EXPECT_EQ(1, CFGetRetainCount(pb.get()));
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/mac/videotoolbox_glue.h b/chromium/media/base/mac/videotoolbox_glue.h
index cb2748c39a3..69dfd6ac511 100644
--- a/chromium/media/base/mac/videotoolbox_glue.h
+++ b/chromium/media/base/mac/videotoolbox_glue.h
@@ -48,6 +48,7 @@ class MEDIA_EXPORT VideoToolboxGlue {
CFStringRef kVTCompressionPropertyKey_AverageBitRate() const;
CFStringRef kVTCompressionPropertyKey_ColorPrimaries() const;
CFStringRef kVTCompressionPropertyKey_ExpectedFrameRate() const;
+ CFStringRef kVTCompressionPropertyKey_MaxFrameDelayCount() const;
CFStringRef kVTCompressionPropertyKey_MaxKeyFrameInterval() const;
CFStringRef kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration() const;
CFStringRef kVTCompressionPropertyKey_ProfileLevel() const;
@@ -89,6 +90,9 @@ class MEDIA_EXPORT VideoToolboxGlue {
CVPixelBufferPoolRef VTCompressionSessionGetPixelBufferPool(
VTCompressionSessionRef session) const;
void VTCompressionSessionInvalidate(VTCompressionSessionRef session) const;
+ OSStatus VTCompressionSessionCompleteFrames(
+ VTCompressionSessionRef session,
+ CoreMediaGlue::CMTime completeUntilPresentationTimeStamp) const;
// Originally from VTSession.h
OSStatus VTSessionSetProperty(VTSessionRef session,
diff --git a/chromium/media/base/mac/videotoolbox_glue.mm b/chromium/media/base/mac/videotoolbox_glue.mm
index 7cffc46a791..18a4f0017b4 100644
--- a/chromium/media/base/mac/videotoolbox_glue.mm
+++ b/chromium/media/base/mac/videotoolbox_glue.mm
@@ -34,6 +34,9 @@ struct VideoToolboxGlue::Library {
typedef CVPixelBufferPoolRef (*VTCompressionSessionGetPixelBufferPoolMethod)(
VTCompressionSessionRef);
typedef void (*VTCompressionSessionInvalidateMethod)(VTCompressionSessionRef);
+ typedef OSStatus (*VTCompressionSessionCompleteFramesMethod)(
+ VTCompressionSessionRef,
+ CoreMediaGlue::CMTime);
typedef OSStatus (*VTSessionSetPropertyMethod)(VTSessionRef,
CFStringRef,
CFTypeRef);
@@ -43,12 +46,14 @@ struct VideoToolboxGlue::Library {
VTCompressionSessionGetPixelBufferPoolMethod
VTCompressionSessionGetPixelBufferPool;
VTCompressionSessionInvalidateMethod VTCompressionSessionInvalidate;
+ VTCompressionSessionCompleteFramesMethod VTCompressionSessionCompleteFrames;
VTSessionSetPropertyMethod VTSessionSetProperty;
CFStringRef* kVTCompressionPropertyKey_AllowFrameReordering;
CFStringRef* kVTCompressionPropertyKey_AverageBitRate;
CFStringRef* kVTCompressionPropertyKey_ColorPrimaries;
CFStringRef* kVTCompressionPropertyKey_ExpectedFrameRate;
+ CFStringRef* kVTCompressionPropertyKey_MaxFrameDelayCount;
CFStringRef* kVTCompressionPropertyKey_MaxKeyFrameInterval;
CFStringRef* kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration;
CFStringRef* kVTCompressionPropertyKey_ProfileLevel;
@@ -86,12 +91,14 @@ class VideoToolboxGlue::Loader {
LOAD_SYMBOL(VTCompressionSessionEncodeFrame)
LOAD_SYMBOL(VTCompressionSessionGetPixelBufferPool)
LOAD_SYMBOL(VTCompressionSessionInvalidate)
+ LOAD_SYMBOL(VTCompressionSessionCompleteFrames)
LOAD_SYMBOL(VTSessionSetProperty)
LOAD_SYMBOL(kVTCompressionPropertyKey_AllowFrameReordering)
LOAD_SYMBOL(kVTCompressionPropertyKey_AverageBitRate)
LOAD_SYMBOL(kVTCompressionPropertyKey_ColorPrimaries)
LOAD_SYMBOL(kVTCompressionPropertyKey_ExpectedFrameRate)
+ LOAD_SYMBOL(kVTCompressionPropertyKey_MaxFrameDelayCount)
LOAD_SYMBOL(kVTCompressionPropertyKey_MaxKeyFrameInterval)
LOAD_SYMBOL(kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration)
LOAD_SYMBOL(kVTCompressionPropertyKey_ProfileLevel)
@@ -189,6 +196,13 @@ void VideoToolboxGlue::VTCompressionSessionInvalidate(
library_->VTCompressionSessionInvalidate(session);
}
+OSStatus VideoToolboxGlue::VTCompressionSessionCompleteFrames(
+ VTCompressionSessionRef session,
+ CoreMediaGlue::CMTime completeUntilPresentationTimeStamp) const {
+ return library_->VTCompressionSessionCompleteFrames(
+ session, completeUntilPresentationTimeStamp);
+}
+
OSStatus VideoToolboxGlue::VTSessionSetProperty(VTSessionRef session,
CFStringRef propertyKey,
CFTypeRef propertyValue) const {
@@ -202,6 +216,7 @@ KEY_ACCESSOR(kVTCompressionPropertyKey_AllowFrameReordering)
KEY_ACCESSOR(kVTCompressionPropertyKey_AverageBitRate)
KEY_ACCESSOR(kVTCompressionPropertyKey_ColorPrimaries)
KEY_ACCESSOR(kVTCompressionPropertyKey_ExpectedFrameRate)
+KEY_ACCESSOR(kVTCompressionPropertyKey_MaxFrameDelayCount)
KEY_ACCESSOR(kVTCompressionPropertyKey_MaxKeyFrameInterval)
KEY_ACCESSOR(kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration)
KEY_ACCESSOR(kVTCompressionPropertyKey_ProfileLevel)
diff --git a/chromium/media/base/media_client.cc b/chromium/media/base/media_client.cc
new file mode 100644
index 00000000000..bcc6de0bffe
--- /dev/null
+++ b/chromium/media/base/media_client.cc
@@ -0,0 +1,39 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media_client.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+static MediaClient* g_media_client = nullptr;
+
+void SetMediaClient(MediaClient* media_client) {
+ g_media_client = media_client;
+}
+
+MediaClient* GetMediaClient() {
+ return g_media_client;
+}
+
+KeySystemInfoForUMA::KeySystemInfoForUMA(
+ const std::string& key_system,
+ const std::string& key_system_name_for_uma,
+ bool reports_key_system_support_to_uma)
+ : key_system(key_system),
+ key_system_name_for_uma(key_system_name_for_uma),
+ reports_key_system_support_to_uma(reports_key_system_support_to_uma) {
+}
+
+KeySystemInfoForUMA::~KeySystemInfoForUMA() {
+}
+
+MediaClient::MediaClient() {
+}
+
+MediaClient::~MediaClient() {
+}
+
+} // namespace media
diff --git a/chromium/media/base/media_client.h b/chromium/media/base/media_client.h
new file mode 100644
index 00000000000..f6af05395c3
--- /dev/null
+++ b/chromium/media/base/media_client.h
@@ -0,0 +1,72 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_CLIENT_H_
+#define MEDIA_BASE_MEDIA_CLIENT_H_
+
+#include <string>
+#include <vector>
+
+#include "media/base/key_system_info.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MediaClient;
+
+// Setter for the client. If a customized client is needed, it should be set
+// early, before the client could possibly be used.
+MEDIA_EXPORT void SetMediaClient(MediaClient* media_client);
+
+// Media's embedder API should only be used by media.
+#if defined(MEDIA_IMPLEMENTATION)
+// Getter for the client. Returns NULL if no customized client is needed.
+MediaClient* GetMediaClient();
+#endif
+
+struct MEDIA_EXPORT KeySystemInfoForUMA {
+ KeySystemInfoForUMA(const std::string& key_system,
+ const std::string& key_system_name_for_uma,
+ bool reports_key_system_support_to_uma);
+ ~KeySystemInfoForUMA();
+
+ // Concrete key system name;
+ std::string key_system;
+
+ // Display name for UMA reporting. For example, the display name for
+ // "org.w3.clearkey" is "ClearKey". When providing this value, make sure to
+ // update tools/metrics/histograms/histograms.xml.
+ std::string key_system_name_for_uma;
+
+ // Whether query/support statistics for |key_system| should be reported.
+ // If set to true, make sure to add a new Media.EME.KeySystemSupport.* to
+ // tools/metrics/histograms/histograms.xml. See KeySystemsSupportUMA for
+ // details on how key system query/support UMA is reported.
+ bool reports_key_system_support_to_uma;
+};
+
+// A client interface for embedders (e.g. content/renderer) to provide
+// customized service.
+class MEDIA_EXPORT MediaClient {
+ public:
+ MediaClient();
+ virtual ~MediaClient();
+
+ // Provides UMA info for key systems that SHOULD be reported to UMA, no matter
+ // whether a key system is actually supported by this client or not. Only
+ // called once per instance.
+ virtual void AddKeySystemsInfoForUMA(
+ std::vector<KeySystemInfoForUMA>* key_systems_info_for_uma) = 0;
+
+ // Returns whether client key systems info should be updated.
+ virtual bool IsKeySystemsUpdateNeeded() = 0;
+
+ // Adds info for supported key systems.
+ virtual void AddSupportedKeySystems(
+ std::vector<KeySystemInfo>* key_systems_info) = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_CLIENT_H_
diff --git a/chromium/media/base/media_keys.cc b/chromium/media/base/media_keys.cc
index a8e94818f71..cf5256bf233 100644
--- a/chromium/media/base/media_keys.cc
+++ b/chromium/media/base/media_keys.cc
@@ -10,8 +10,4 @@ MediaKeys::MediaKeys() {}
MediaKeys::~MediaKeys() {}
-Decryptor* MediaKeys::GetDecryptor() {
- return NULL;
-}
-
} // namespace media
diff --git a/chromium/media/base/media_keys.h b/chromium/media/base/media_keys.h
index 022f6bcce29..ba2a680d39f 100644
--- a/chromium/media/base/media_keys.h
+++ b/chromium/media/base/media_keys.h
@@ -11,6 +11,8 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "media/base/eme_constants.h"
#include "media/base/media_export.h"
#include "url/gurl.h"
@@ -20,21 +22,21 @@ class Time;
namespace media {
-class Decryptor;
+class CdmContext;
+struct CdmKeyInformation;
template <typename... T>
class CdmPromiseTemplate;
typedef CdmPromiseTemplate<std::string> NewSessionCdmPromise;
typedef CdmPromiseTemplate<> SimpleCdmPromise;
-typedef std::vector<std::vector<uint8> > KeyIdsVector;
-typedef CdmPromiseTemplate<KeyIdsVector> KeyIdsPromise;
+typedef ScopedVector<CdmKeyInformation> CdmKeysInfo;
// Performs media key operations.
//
// All key operations are called on the renderer thread. Therefore, these calls
// should be fast and nonblocking; key events should be fired asynchronously.
-class MEDIA_EXPORT MediaKeys {
+class MEDIA_EXPORT MediaKeys{
public:
// Reported to UMA, so never reuse a value!
// Must be kept in sync with blink::WebMediaPlayerClient::MediaKeyErrorCode
@@ -60,77 +62,82 @@ class MEDIA_EXPORT MediaKeys {
QUOTA_EXCEEDED_ERROR,
UNKNOWN_ERROR,
CLIENT_ERROR,
- OUTPUT_ERROR
+ OUTPUT_ERROR,
+ EXCEPTION_MAX = OUTPUT_ERROR
};
// Type of license required when creating/loading a session.
// Must be consistent with the values specified in the spec:
- // https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#extensions
+ // https://w3c.github.io/encrypted-media/#idl-def-MediaKeySessionType
enum SessionType {
TEMPORARY_SESSION,
- PERSISTENT_SESSION
+ PERSISTENT_LICENSE_SESSION,
+ PERSISTENT_RELEASE_MESSAGE_SESSION
};
- static const uint32 kInvalidSessionId = 0;
-#if defined(ENABLE_BROWSER_CDMS)
- static const int kInvalidCdmId = 0;
-#endif
+ // Type of message being sent to the application.
+ // Must be consistent with the values specified in the spec:
+ // https://w3c.github.io/encrypted-media/#idl-def-MediaKeyMessageType
+ enum MessageType {
+ LICENSE_REQUEST,
+ LICENSE_RENEWAL,
+ LICENSE_RELEASE,
+ MESSAGE_TYPE_MAX = LICENSE_RELEASE
+ };
- MediaKeys();
virtual ~MediaKeys();
// Provides a server certificate to be used to encrypt messages to the
// license server.
- virtual void SetServerCertificate(const uint8* certificate_data,
- int certificate_data_length,
+ virtual void SetServerCertificate(const std::vector<uint8_t>& certificate,
scoped_ptr<SimpleCdmPromise> promise) = 0;
- // Creates a session with the |init_data_type|, |init_data| and |session_type|
- // provided.
- // Note: UpdateSession() and ReleaseSession() should only be called after
- // |promise| is resolved.
- virtual void CreateSession(const std::string& init_data_type,
- const uint8* init_data,
- int init_data_length,
- SessionType session_type,
- scoped_ptr<NewSessionCdmPromise> promise) = 0;
-
- // Loads a session with the |web_session_id| provided.
- // Note: UpdateSession() and ReleaseSession() should only be called after
- // |promise| is resolved.
- virtual void LoadSession(const std::string& web_session_id,
+ // Creates a session with |session_type|. Then generates a request with the
+ // |init_data_type| and |init_data|.
+ // Note:
+ // 1. The session ID will be provided when the |promise| is resolved.
+ // 2. The generated request should be returned through a SessionMessageCB,
+ // which must be AFTER the |promise| is resolved. Otherwise, the session ID
+ // in the callback will not be recognized.
+ // 3. UpdateSession(), CloseSession() and RemoveSession() should only be
+ // called after the |promise| is resolved.
+ virtual void CreateSessionAndGenerateRequest(
+ SessionType session_type,
+ EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ scoped_ptr<NewSessionCdmPromise> promise) = 0;
+
+ // Loads a session with the |session_id| provided.
+ // Note: UpdateSession(), CloseSession() and RemoveSession() should only be
+ // called after the |promise| is resolved.
+ virtual void LoadSession(SessionType session_type,
+ const std::string& session_id,
scoped_ptr<NewSessionCdmPromise> promise) = 0;
- // Updates a session specified by |web_session_id| with |response|.
- virtual void UpdateSession(const std::string& web_session_id,
- const uint8* response,
- int response_length,
+ // Updates a session specified by |session_id| with |response|.
+ virtual void UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
scoped_ptr<SimpleCdmPromise> promise) = 0;
- // Closes the session specified by |web_session_id|.
- virtual void CloseSession(const std::string& web_session_id,
+ // Closes the session specified by |session_id|. The CDM should resolve or
+ // reject the |promise| when the call has been processed. This may be before
+ // the session is closed. Once the session is closed, a SessionClosedCB must
+ // also be called.
+ virtual void CloseSession(const std::string& session_id,
scoped_ptr<SimpleCdmPromise> promise) = 0;
// Removes stored session data associated with the session specified by
- // |web_session_id|.
- virtual void RemoveSession(const std::string& web_session_id,
+ // |session_id|.
+ virtual void RemoveSession(const std::string& session_id,
scoped_ptr<SimpleCdmPromise> promise) = 0;
- // Retrieves the key IDs for keys in the session that the CDM knows are
- // currently usable to decrypt media data.
- virtual void GetUsableKeyIds(const std::string& web_session_id,
- scoped_ptr<KeyIdsPromise> promise) = 0;
+ // Returns the CdmContext associated with |this|, which must NOT be null.
+ // Usually the CdmContext is owned by |this|. Caller needs to make sure it is
+ // not used after |this| is destructed.
+ virtual CdmContext* GetCdmContext() = 0;
- // Gets the Decryptor object associated with the MediaKeys. Returns NULL if
- // no Decryptor object is associated. The returned object is only guaranteed
- // to be valid during the MediaKeys' lifetime.
- virtual Decryptor* GetDecryptor();
-
-#if defined(ENABLE_BROWSER_CDMS)
- // Returns the CDM ID associated with |this|. May be kInvalidCdmId if no CDM
- // ID is associated.
- virtual int GetCdmId() const = 0;
-#endif
+ protected:
+ MediaKeys();
private:
DISALLOW_COPY_AND_ASSIGN(MediaKeys);
@@ -138,24 +145,30 @@ class MEDIA_EXPORT MediaKeys {
// Key event callbacks. See the spec for details:
// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#event-summary
-typedef base::Callback<void(const std::string& web_session_id,
- const std::vector<uint8>& message,
- const GURL& destination_url)> SessionMessageCB;
-
-typedef base::Callback<void(const std::string& web_session_id)> SessionReadyCB;
-
-typedef base::Callback<void(const std::string& web_session_id)> SessionClosedCB;
-
-typedef base::Callback<void(const std::string& web_session_id,
- MediaKeys::Exception exception_code,
- uint32 system_code,
- const std::string& error_message)> SessionErrorCB;
-
-typedef base::Callback<void(const std::string& web_session_id,
- bool has_additional_usable_key)>
- SessionKeysChangeCB;
-typedef base::Callback<void(const std::string& web_session_id,
+typedef base::Callback<void(const std::string& session_id,
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8_t>& message,
+ const GURL& legacy_destination_url)>
+ SessionMessageCB;
+
+// Called when the session specified by |session_id| is closed. Note that the
+// CDM may close a session at any point, such as in response to a CloseSession()
+// call, when the session is no longer needed, or when system resources are
+// lost. See for details: http://w3c.github.io/encrypted-media/#session-close
+typedef base::Callback<void(const std::string& session_id)> SessionClosedCB;
+
+typedef base::Callback<void(const std::string& session_id,
+ MediaKeys::Exception exception,
+ uint32_t system_code,
+ const std::string& error_message)>
+ LegacySessionErrorCB;
+
+typedef base::Callback<void(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info)> SessionKeysChangeCB;
+
+typedef base::Callback<void(const std::string& session_id,
const base::Time& new_expiry_time)>
SessionExpirationUpdateCB;
diff --git a/chromium/media/base/media_log.cc b/chromium/media/base/media_log.cc
index b172bfbce91..c89c4151fb6 100644
--- a/chromium/media/base/media_log.cc
+++ b/chromium/media/base/media_log.cc
@@ -4,10 +4,9 @@
#include "media/base/media_log.h"
-#include <string>
#include "base/atomic_sequence_num.h"
-#include "base/logging.h"
+#include "base/json/json_writer.h"
#include "base/values.h"
namespace media {
@@ -16,7 +15,33 @@ namespace media {
// unique IDs.
static base::StaticAtomicSequenceNumber g_media_log_count;
-const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
+std::string MediaLog::MediaLogLevelToString(MediaLogLevel level) {
+ switch (level) {
+ case MEDIALOG_ERROR:
+ return "error";
+ case MEDIALOG_INFO:
+ return "info";
+ case MEDIALOG_DEBUG:
+ return "debug";
+ }
+ NOTREACHED();
+ return NULL;
+}
+
+MediaLogEvent::Type MediaLog::MediaLogLevelToEventType(MediaLogLevel level) {
+ switch (level) {
+ case MEDIALOG_ERROR:
+ return MediaLogEvent::MEDIA_ERROR_LOG_ENTRY;
+ case MEDIALOG_INFO:
+ return MediaLogEvent::MEDIA_INFO_LOG_ENTRY;
+ case MEDIALOG_DEBUG:
+ return MediaLogEvent::MEDIA_DEBUG_LOG_ENTRY;
+ }
+ NOTREACHED();
+ return MediaLogEvent::MEDIA_ERROR_LOG_ENTRY;
+}
+
+std::string MediaLog::EventTypeToString(MediaLogEvent::Type type) {
switch (type) {
case MediaLogEvent::WEBMEDIAPLAYER_CREATED:
return "WEBMEDIAPLAYER_CREATED";
@@ -52,8 +77,12 @@ const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
return "TEXT_ENDED";
case MediaLogEvent::BUFFERED_EXTENTS_CHANGED:
return "BUFFERED_EXTENTS_CHANGED";
- case MediaLogEvent::MEDIA_SOURCE_ERROR:
- return "MEDIA_SOURCE_ERROR";
+ case MediaLogEvent::MEDIA_ERROR_LOG_ENTRY:
+ return "MEDIA_ERROR_LOG_ENTRY";
+ case MediaLogEvent::MEDIA_INFO_LOG_ENTRY:
+ return "MEDIA_INFO_LOG_ENTRY";
+ case MediaLogEvent::MEDIA_DEBUG_LOG_ENTRY:
+ return "MEDIA_DEBUG_LOG_ENTRY";
case MediaLogEvent::PROPERTY_CHANGE:
return "PROPERTY_CHANGE";
}
@@ -61,7 +90,7 @@ const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
return NULL;
}
-const char* MediaLog::PipelineStatusToString(PipelineStatus status) {
+std::string MediaLog::PipelineStatusToString(PipelineStatus status) {
switch (status) {
case PIPELINE_OK:
return "pipeline: ok";
@@ -88,7 +117,7 @@ const char* MediaLog::PipelineStatusToString(PipelineStatus status) {
case DEMUXER_ERROR_COULD_NOT_OPEN:
return "demuxer: could not open";
case DEMUXER_ERROR_COULD_NOT_PARSE:
- return "dumuxer: could not parse";
+ return "demuxer: could not parse";
case DEMUXER_ERROR_NO_SUPPORTED_STREAMS:
return "demuxer: no supported streams";
case DECODER_ERROR_NOT_SUPPORTED:
@@ -98,12 +127,20 @@ const char* MediaLog::PipelineStatusToString(PipelineStatus status) {
return NULL;
}
-LogHelper::LogHelper(const LogCB& log_cb) : log_cb_(log_cb) {}
-
-LogHelper::~LogHelper() {
- if (log_cb_.is_null())
- return;
- log_cb_.Run(stream_.str());
+std::string MediaLog::MediaEventToLogString(const MediaLogEvent& event) {
+ // Special case for PIPELINE_ERROR, since that's by far the most useful
+ // event for figuring out media pipeline failures, and just reporting
+ // pipeline status as numeric code is not very helpful/user-friendly.
+ int error_code = 0;
+ if (event.type == MediaLogEvent::PIPELINE_ERROR &&
+ event.params.GetInteger("pipeline_error", &error_code)) {
+ PipelineStatus status = static_cast<PipelineStatus>(error_code);
+ return EventTypeToString(event.type) + " " +
+ media::MediaLog::PipelineStatusToString(status);
+ }
+ std::string params_json;
+ base::JSONWriter::Write(&event.params, &params_json);
+ return EventTypeToString(event.type) + " " + params_json;
}
MediaLog::MediaLog() : id_(g_media_log_count.GetNext()) {}
@@ -121,21 +158,27 @@ scoped_ptr<MediaLogEvent> MediaLog::CreateEvent(MediaLogEvent::Type type) {
}
scoped_ptr<MediaLogEvent> MediaLog::CreateBooleanEvent(
- MediaLogEvent::Type type, const char* property, bool value) {
+ MediaLogEvent::Type type,
+ const std::string& property,
+ bool value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(type));
event->params.SetBoolean(property, value);
return event.Pass();
}
scoped_ptr<MediaLogEvent> MediaLog::CreateStringEvent(
- MediaLogEvent::Type type, const char* property, const std::string& value) {
+ MediaLogEvent::Type type,
+ const std::string& property,
+ const std::string& value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(type));
event->params.SetString(property, value);
return event.Pass();
}
scoped_ptr<MediaLogEvent> MediaLog::CreateTimeEvent(
- MediaLogEvent::Type type, const char* property, base::TimeDelta value) {
+ MediaLogEvent::Type type,
+ const std::string& property,
+ base::TimeDelta value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(type));
if (value.is_max())
event->params.SetString(property, "unknown");
@@ -167,7 +210,7 @@ scoped_ptr<MediaLogEvent> MediaLog::CreatePipelineStateChangedEvent(
scoped_ptr<MediaLogEvent> MediaLog::CreatePipelineErrorEvent(
PipelineStatus error) {
scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PIPELINE_ERROR));
- event->params.SetString("pipeline_error", PipelineStatusToString(error));
+ event->params.SetInteger("pipeline_error", error);
return event.Pass();
}
@@ -191,44 +234,42 @@ scoped_ptr<MediaLogEvent> MediaLog::CreateBufferedExtentsChangedEvent(
return event.Pass();
}
-scoped_ptr<MediaLogEvent> MediaLog::CreateMediaSourceErrorEvent(
- const std::string& error) {
- scoped_ptr<MediaLogEvent> event(
- CreateEvent(MediaLogEvent::MEDIA_SOURCE_ERROR));
- event->params.SetString("error", error);
- return event.Pass();
+void MediaLog::AddLogEvent(MediaLogLevel level, const std::string& message) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogLevelToEventType(level)));
+ event->params.SetString(MediaLogLevelToString(level), message);
+ AddEvent(event.Pass());
}
void MediaLog::SetStringProperty(
- const char* key, const std::string& value) {
+ const std::string& key, const std::string& value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
event->params.SetString(key, value);
AddEvent(event.Pass());
}
void MediaLog::SetIntegerProperty(
- const char* key, int value) {
+ const std::string& key, int value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
event->params.SetInteger(key, value);
AddEvent(event.Pass());
}
void MediaLog::SetDoubleProperty(
- const char* key, double value) {
+ const std::string& key, double value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
event->params.SetDouble(key, value);
AddEvent(event.Pass());
}
void MediaLog::SetBooleanProperty(
- const char* key, bool value) {
+ const std::string& key, bool value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
event->params.SetBoolean(key, value);
AddEvent(event.Pass());
}
void MediaLog::SetTimeProperty(
- const char* key, base::TimeDelta value) {
+ const std::string& key, base::TimeDelta value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
if (value.is_max())
event->params.SetString(key, "unknown");
@@ -237,4 +278,14 @@ void MediaLog::SetTimeProperty(
AddEvent(event.Pass());
}
+LogHelper::LogHelper(MediaLog::MediaLogLevel level, const LogCB& log_cb)
+ : level_(level), log_cb_(log_cb) {
+}
+
+LogHelper::~LogHelper() {
+ if (log_cb_.is_null())
+ return;
+ log_cb_.Run(level_, stream_.str());
+}
+
} //namespace media
diff --git a/chromium/media/base/media_log.h b/chromium/media/base/media_log.h
index f342ee84fc4..8b40b5d4e61 100644
--- a/chromium/media/base/media_log.h
+++ b/chromium/media/base/media_log.h
@@ -8,6 +8,7 @@
#include <sstream>
#include <string>
+#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
@@ -17,30 +18,22 @@
namespace media {
-// Indicates a string should be added to the log.
-// First parameter - The string to add to the log.
-typedef base::Callback<void(const std::string&)> LogCB;
-
-// Helper class to make it easier to use log_cb like DVLOG().
-class LogHelper {
- public:
- LogHelper(const LogCB& Log_cb);
- ~LogHelper();
-
- std::ostream& stream() { return stream_; }
-
- private:
- LogCB log_cb_;
- std::stringstream stream_;
-};
-
-#define MEDIA_LOG(log_cb) LogHelper(log_cb).stream()
class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
public:
+ enum MediaLogLevel {
+ MEDIALOG_ERROR,
+ MEDIALOG_INFO,
+ MEDIALOG_DEBUG,
+ };
+
// Convert various enums to strings.
- static const char* EventTypeToString(MediaLogEvent::Type type);
- static const char* PipelineStatusToString(PipelineStatus);
+ static std::string MediaLogLevelToString(MediaLogLevel level);
+ static MediaLogEvent::Type MediaLogLevelToEventType(MediaLogLevel level);
+ static std::string EventTypeToString(MediaLogEvent::Type type);
+ static std::string PipelineStatusToString(PipelineStatus status);
+
+ static std::string MediaEventToLogString(const MediaLogEvent& event);
MediaLog();
@@ -51,11 +44,13 @@ class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
// Helper methods to create events and their parameters.
scoped_ptr<MediaLogEvent> CreateEvent(MediaLogEvent::Type type);
scoped_ptr<MediaLogEvent> CreateBooleanEvent(
- MediaLogEvent::Type type, const char* property, bool value);
- scoped_ptr<MediaLogEvent> CreateStringEvent(
- MediaLogEvent::Type type, const char* property, const std::string& value);
- scoped_ptr<MediaLogEvent> CreateTimeEvent(
- MediaLogEvent::Type type, const char* property, base::TimeDelta value);
+ MediaLogEvent::Type type, const std::string& property, bool value);
+ scoped_ptr<MediaLogEvent> CreateStringEvent(MediaLogEvent::Type type,
+ const std::string& property,
+ const std::string& value);
+ scoped_ptr<MediaLogEvent> CreateTimeEvent(MediaLogEvent::Type type,
+ const std::string& property,
+ base::TimeDelta value);
scoped_ptr<MediaLogEvent> CreateLoadEvent(const std::string& url);
scoped_ptr<MediaLogEvent> CreateSeekEvent(float seconds);
scoped_ptr<MediaLogEvent> CreatePipelineStateChangedEvent(
@@ -65,15 +60,16 @@ class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
size_t width, size_t height);
scoped_ptr<MediaLogEvent> CreateBufferedExtentsChangedEvent(
int64 start, int64 current, int64 end);
- scoped_ptr<MediaLogEvent> CreateMediaSourceErrorEvent(
- const std::string& error);
+
+ // Report a log message at the specified log level.
+ void AddLogEvent(MediaLogLevel level, const std::string& message);
// Report a property change without an accompanying event.
- void SetStringProperty(const char* key, const std::string& value);
- void SetIntegerProperty(const char* key, int value);
- void SetDoubleProperty(const char* key, double value);
- void SetBooleanProperty(const char* key, bool value);
- void SetTimeProperty(const char* key, base::TimeDelta value);
+ void SetStringProperty(const std::string& key, const std::string& value);
+ void SetIntegerProperty(const std::string& key, int value);
+ void SetDoubleProperty(const std::string& key, double value);
+ void SetBooleanProperty(const std::string& key, bool value);
+ void SetTimeProperty(const std::string& key, base::TimeDelta value);
protected:
friend class base::RefCountedThreadSafe<MediaLog>;
@@ -86,6 +82,35 @@ class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
DISALLOW_COPY_AND_ASSIGN(MediaLog);
};
+// Indicates a string should be added to the log.
+// First parameter - The log level for the string.
+// Second parameter - The string to add to the log.
+typedef base::Callback<void(MediaLog::MediaLogLevel, const std::string&)> LogCB;
+
+// Helper class to make it easier to use log_cb like DVLOG().
+class LogHelper {
+ public:
+ LogHelper(MediaLog::MediaLogLevel level, const LogCB& log_cb);
+ ~LogHelper();
+
+ std::ostream& stream() { return stream_; }
+
+ private:
+ MediaLog::MediaLogLevel level_;
+ LogCB log_cb_;
+ std::stringstream stream_;
+};
+
+// Provides a stringstream to collect a log entry to pass to the provided
+// LogCB at the requested level.
+#define MEDIA_LOG(level, log_cb) \
+ LogHelper((MediaLog::MEDIALOG_##level), (log_cb)).stream()
+
+// Logs only while count < max. Increments count for each log. Use LAZY_STREAM
+// to avoid wasteful evaluation of subsequent stream arguments.
+#define LIMITED_MEDIA_LOG(level, log_cb, count, max) \
+ LAZY_STREAM(MEDIA_LOG(level, log_cb), (count) < (max) && ((count)++ || true))
+
} // namespace media
#endif // MEDIA_BASE_MEDIA_LOG_H_
diff --git a/chromium/media/base/media_log_event.h b/chromium/media/base/media_log_event.h
index ee21d001ab5..db687fbb176 100644
--- a/chromium/media/base/media_log_event.h
+++ b/chromium/media/base/media_log_event.h
@@ -82,10 +82,18 @@ struct MediaLogEvent {
// "buffer_end": <last buffered byte>.
BUFFERED_EXTENTS_CHANGED,
- // Errors reported by Media Source Extensions code.
- MEDIA_SOURCE_ERROR,
+ // Error log reported by media code such as details of an MSE parse error.
+ MEDIA_ERROR_LOG_ENTRY,
// params: "error": Error string describing the error detected.
+ // Informative log reported by media code.
+ MEDIA_INFO_LOG_ENTRY,
+ // params: "info": String with details of an informative log entry.
+
+ // Debug log reported by media code.
+ MEDIA_DEBUG_LOG_ENTRY,
+ // params: "debug": String with details of a debug log entry.
+
// A property has changed without any special event occurring.
PROPERTY_CHANGE,
diff --git a/chromium/media/base/media_permission.cc b/chromium/media/base/media_permission.cc
new file mode 100644
index 00000000000..739e4454ae7
--- /dev/null
+++ b/chromium/media/base/media_permission.cc
@@ -0,0 +1,15 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media_permission.h"
+
+namespace media {
+
+MediaPermission::MediaPermission() {
+}
+
+MediaPermission::~MediaPermission() {
+}
+
+} // namespace media
diff --git a/chromium/media/base/media_permission.h b/chromium/media/base/media_permission.h
new file mode 100644
index 00000000000..760ba0caa9f
--- /dev/null
+++ b/chromium/media/base/media_permission.h
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_PERMISSION_H_
+#define MEDIA_BASE_MEDIA_PERMISSION_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "media/base/media_export.h"
+
+class GURL;
+
+namespace media {
+
+// Interface to handle media related permission checks and requests.
+class MEDIA_EXPORT MediaPermission {
+ public:
+ typedef base::Callback<void(bool)> PermissionStatusCB;
+
+ enum Type {
+ PROTECTED_MEDIA_IDENTIFIER,
+ };
+
+ MediaPermission();
+ virtual ~MediaPermission();
+
+ // Checks whether |type| is permitted for |security_origion| without
+ // triggering user interaction (e.g. permission prompt). The status will be
+ // |false| if the permission has never been set.
+ virtual void HasPermission(
+ Type type,
+ const GURL& security_origin,
+ const PermissionStatusCB& permission_status_cb) = 0;
+
+ // Requests |type| permission for |security_origion|. This may trigger user
+ // interaction (e.g. permission prompt) if the permission has never been set.
+ virtual void RequestPermission(
+ Type type,
+ const GURL& security_origin,
+ const PermissionStatusCB& permission_status_cb) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MediaPermission);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_PERMISSION_H_
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index ad7a7eabac0..05a04b713f5 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -9,22 +9,20 @@ namespace switches {
// Allow users to specify a custom buffer size for debugging purpose.
const char kAudioBufferSize[] = "audio-buffer-size";
+// Disables the new vsync driven video renderering path.
+const char kDisableNewVideoRenderer[] = "disable-new-video-renderer";
+
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
-// Bypass autodetection of the upper limit on resolution of streams that can
-// be hardware decoded.
-const char kIgnoreResolutionLimitsForAcceleratedVideoDecode[] =
- "ignore-resolution-limits-for-accelerated-video-decode";
-
#if defined(OS_ANDROID)
// Disables the infobar popup for accessing protected media identifier.
const char kDisableInfobarForProtectedMediaIdentifier[] =
"disable-infobar-for-protected-media-identifier";
-// Enables use of non-compositing MediaDrm decoding by default for Encrypted
-// Media Extensions implementation.
-const char kMediaDrmEnableNonCompositing[] = "mediadrm-enable-non-compositing";
+// Sets the MediaSource player that uses the separate media thread
+const char kEnableMediaThreadForMediaPlayback[] =
+ "enable-media-thread-for-media-playback";
#endif
#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
@@ -88,13 +86,36 @@ const char kWaveOutBuffers[] = "waveout-buffers";
const char kUseCras[] = "use-cras";
#endif
+// Enables the audio thread hang monitor. Allows us to find users in the field
+// who have stuck audio threads. See crbug.com/422522 and crbug.com/478932.
+// TODO(dalecurtis): This should be removed once those issues are resolved.
+const char kEnableAudioHangMonitor[] = "enable-audio-hang-monitor";
+
// Use fake device for Media Stream to replace actual camera and microphone.
const char kUseFakeDeviceForMediaStream[] = "use-fake-device-for-media-stream";
-// Use a raw video file as fake video capture device.
+// Use an .y4m file to play as the webcam. See the comments in
+// media/video/capture/file_video_capture_device.h for more details.
const char kUseFileForFakeVideoCapture[] = "use-file-for-fake-video-capture";
+// Play a .wav file as the microphone. Note that for WebRTC calls we'll treat
+// the bits as if they came from the microphone, which means you should disable
+// audio processing (lest your audio file will play back distorted). The input
+// file is converted to suit Chrome's audio buses if necessary, so most sane
+// .wav files should work.
+const char kUseFileForFakeAudioCapture[] = "use-file-for-fake-audio-capture";
+
// Enables support for inband text tracks in media content.
const char kEnableInbandTextTracks[] = "enable-inband-text-tracks";
+// When running tests on a system without the required hardware or libraries,
+// this flag will cause the tests to fail. Otherwise, they silently succeed.
+const char kRequireAudioHardwareForTesting[] =
+ "require-audio-hardware-for-testing";
+
+// Allows clients to override the threshold for when the media renderer will
+// declare the underflow state for the video stream when audio is present.
+// TODO(dalecurtis): Remove once experiments for http://crbug.com/470940 finish.
+const char kVideoUnderflowThresholdMs[] = "video-underflow-threshold-ms";
+
} // namespace switches
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index 74ba0980e2b..4d7aad7c239 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -14,14 +14,13 @@ namespace switches {
MEDIA_EXPORT extern const char kAudioBufferSize[];
-MEDIA_EXPORT extern const char kVideoThreads[];
+MEDIA_EXPORT extern const char kDisableNewVideoRenderer[];
-MEDIA_EXPORT extern const char
- kIgnoreResolutionLimitsForAcceleratedVideoDecode[];
+MEDIA_EXPORT extern const char kVideoThreads[];
#if defined(OS_ANDROID)
MEDIA_EXPORT extern const char kDisableInfobarForProtectedMediaIdentifier[];
-MEDIA_EXPORT extern const char kMediaDrmEnableNonCompositing[];
+MEDIA_EXPORT extern const char kEnableMediaThreadForMediaPlayback[];
#endif
#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
@@ -47,11 +46,18 @@ MEDIA_EXPORT extern const char kWaveOutBuffers[];
MEDIA_EXPORT extern const char kUseCras[];
#endif
+MEDIA_EXPORT extern const char kEnableAudioHangMonitor[];
+
MEDIA_EXPORT extern const char kUseFakeDeviceForMediaStream[];
MEDIA_EXPORT extern const char kUseFileForFakeVideoCapture[];
+MEDIA_EXPORT extern const char kUseFileForFakeAudioCapture[];
MEDIA_EXPORT extern const char kEnableInbandTextTracks[];
+MEDIA_EXPORT extern const char kRequireAudioHardwareForTesting[];
+
+MEDIA_EXPORT extern const char kVideoUnderflowThresholdMs[];
+
} // namespace switches
#endif // MEDIA_BASE_MEDIA_SWITCHES_H_
diff --git a/chromium/media/base/media_win.cc b/chromium/media/base/media_win.cc
index 43bf6a7f458..03d78b1d454 100644
--- a/chromium/media/base/media_win.cc
+++ b/chromium/media/base/media_win.cc
@@ -12,6 +12,8 @@
#include <delayimp.h>
#include "base/files/file_path.h"
+#include "base/metrics/sparse_histogram.h"
+#include "media/ffmpeg/ffmpeg_common.h"
#pragma comment(lib, "delayimp.lib")
@@ -31,8 +33,23 @@ bool InitializeMediaLibraryInternal(const base::FilePath& module_dir) {
module_dir.AppendASCII(kFFmpegDLL).value().c_str(), NULL,
LOAD_WITH_ALTERED_SEARCH_PATH);
- // Check that we loaded the library successfully.
- return lib != NULL;
+ bool initialized = (lib != NULL);
+
+ // TODO(scherkus): Remove all the bool-ness from these functions as we no
+ // longer support disabling HTML5 media at runtime. http://crbug.com/440892
+ if (!initialized) {
+ UMA_HISTOGRAM_SPARSE_SLOWLY("Media.Initialize.Windows", GetLastError());
+ return false;
+ }
+
+ // VS2013 has a bug where FMA3 instructions will be executed on CPUs that
+ // support them despite them being disabled at the OS level, causing illegal
+ // instruction exceptions. Because Web Audio's FFT code *might* run before
+ // HTML5 media code, call av_log_set_level() to force library initialziation.
+ // See http://crbug.com/440892 for details.
+ av_log_set_level(AV_LOG_QUIET);
+
+ return initialized;
}
} // namespace internal
diff --git a/chromium/media/base/mock_audio_renderer_sink.h b/chromium/media/base/mock_audio_renderer_sink.h
index 5f1c245c428..3a85d528a03 100644
--- a/chromium/media/base/mock_audio_renderer_sink.h
+++ b/chromium/media/base/mock_audio_renderer_sink.h
@@ -21,12 +21,12 @@ class MockAudioRendererSink : public AudioRendererSink {
MOCK_METHOD0(Play, void());
MOCK_METHOD1(SetVolume, bool(double volume));
- virtual void Initialize(const AudioParameters& params,
- RenderCallback* renderer) override;
+ void Initialize(const AudioParameters& params,
+ RenderCallback* renderer) override;
AudioRendererSink::RenderCallback* callback() { return callback_; }
protected:
- virtual ~MockAudioRendererSink();
+ ~MockAudioRendererSink() override;
private:
RenderCallback* callback_;
diff --git a/chromium/media/base/mock_filters.cc b/chromium/media/base/mock_filters.cc
index 0dc6d4e1fd1..0a66c466b9b 100644
--- a/chromium/media/base/mock_filters.cc
+++ b/chromium/media/base/mock_filters.cc
@@ -18,14 +18,20 @@ MockDemuxer::MockDemuxer() {}
MockDemuxer::~MockDemuxer() {}
-MockDemuxerStream::MockDemuxerStream(DemuxerStream::Type type) : type_(type) {}
+MockDemuxerStream::MockDemuxerStream(DemuxerStream::Type type)
+ : type_(type), liveness_(LIVENESS_UNKNOWN) {
+}
MockDemuxerStream::~MockDemuxerStream() {}
-DemuxerStream::Type MockDemuxerStream::type() {
+DemuxerStream::Type MockDemuxerStream::type() const {
return type_;
}
+DemuxerStream::Liveness MockDemuxerStream::liveness() const {
+ return liveness_;
+}
+
AudioDecoderConfig MockDemuxerStream::audio_decoder_config() {
DCHECK_EQ(type_, DemuxerStream::AUDIO);
return audio_decoder_config_;
@@ -48,6 +54,10 @@ void MockDemuxerStream::set_video_decoder_config(
video_decoder_config_ = config;
}
+void MockDemuxerStream::set_liveness(DemuxerStream::Liveness liveness) {
+ liveness_ = liveness;
+}
+
VideoRotation MockDemuxerStream::video_rotation() {
return VIDEO_ROTATION_0;
}
diff --git a/chromium/media/base/mock_filters.h b/chromium/media/base/mock_filters.h
index d2b9bd6c12c..31358758079 100644
--- a/chromium/media/base/mock_filters.h
+++ b/chromium/media/base/mock_filters.h
@@ -34,14 +34,13 @@ class MockDemuxer : public Demuxer {
// Demuxer implementation.
MOCK_METHOD3(Initialize,
void(DemuxerHost* host, const PipelineStatusCB& cb, bool));
- MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
+ MOCK_METHOD1(SetPlaybackRate, void(double playback_rate));
MOCK_METHOD2(Seek, void(base::TimeDelta time, const PipelineStatusCB& cb));
MOCK_METHOD0(Stop, void());
MOCK_METHOD0(OnAudioRendererDisabled, void());
MOCK_METHOD1(GetStream, DemuxerStream*(DemuxerStream::Type));
MOCK_CONST_METHOD0(GetStartTime, base::TimeDelta());
MOCK_CONST_METHOD0(GetTimelineOffset, base::Time());
- MOCK_CONST_METHOD0(GetLiveness, Liveness());
private:
DISALLOW_COPY_AND_ASSIGN(MockDemuxer);
@@ -53,20 +52,23 @@ class MockDemuxerStream : public DemuxerStream {
virtual ~MockDemuxerStream();
// DemuxerStream implementation.
- virtual Type type() override;
+ Type type() const override;
+ Liveness liveness() const override;
MOCK_METHOD1(Read, void(const ReadCB& read_cb));
- virtual AudioDecoderConfig audio_decoder_config() override;
- virtual VideoDecoderConfig video_decoder_config() override;
+ AudioDecoderConfig audio_decoder_config() override;
+ VideoDecoderConfig video_decoder_config() override;
MOCK_METHOD0(EnableBitstreamConverter, void());
MOCK_METHOD0(SupportsConfigChanges, bool());
void set_audio_decoder_config(const AudioDecoderConfig& config);
void set_video_decoder_config(const VideoDecoderConfig& config);
+ void set_liveness(Liveness liveness);
- virtual VideoRotation video_rotation() override;
+ VideoRotation video_rotation() override;
private:
- DemuxerStream::Type type_;
+ Type type_;
+ Liveness liveness_;
AudioDecoderConfig audio_decoder_config_;
VideoDecoderConfig video_decoder_config_;
@@ -119,16 +121,19 @@ class MockVideoRenderer : public VideoRenderer {
virtual ~MockVideoRenderer();
// VideoRenderer implementation.
- MOCK_METHOD8(Initialize, void(DemuxerStream* stream,
- bool low_delay,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb));
+ MOCK_METHOD9(Initialize,
+ void(DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const TimeSource::WallClockTimeCB& wall_clock_time_cb,
+ const base::Closure& waiting_for_decryption_key_cb));
MOCK_METHOD1(Flush, void(const base::Closure& callback));
MOCK_METHOD1(StartPlayingFrom, void(base::TimeDelta));
+ MOCK_METHOD1(OnTimeStateChanged, void(bool));
private:
DISALLOW_COPY_AND_ASSIGN(MockVideoRenderer);
@@ -140,12 +145,15 @@ class MockAudioRenderer : public AudioRenderer {
virtual ~MockAudioRenderer();
// AudioRenderer implementation.
- MOCK_METHOD6(Initialize, void(DemuxerStream* stream,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb));
+ MOCK_METHOD8(Initialize,
+ void(DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const base::Closure& waiting_for_decryption_key_cb));
MOCK_METHOD0(GetTimeSource, TimeSource*());
MOCK_METHOD1(Flush, void(const base::Closure& callback));
MOCK_METHOD0(StartPlaying, void());
@@ -161,20 +169,24 @@ class MockRenderer : public Renderer {
virtual ~MockRenderer();
// Renderer implementation.
- MOCK_METHOD6(Initialize, void(DemuxerStreamProvider* demuxer_stream_provider,
- const base::Closure& init_cb,
- const StatisticsCB& statistics_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const BufferingStateCB& buffering_state_cb));
+ MOCK_METHOD7(Initialize,
+ void(DemuxerStreamProvider* demuxer_stream_provider,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const base::Closure& waiting_for_decryption_key_cb));
MOCK_METHOD1(Flush, void(const base::Closure& flush_cb));
MOCK_METHOD1(StartPlayingFrom, void(base::TimeDelta timestamp));
- MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
+ MOCK_METHOD1(SetPlaybackRate, void(double playback_rate));
MOCK_METHOD1(SetVolume, void(float volume));
MOCK_METHOD0(GetMediaTime, base::TimeDelta());
MOCK_METHOD0(HasAudio, bool());
MOCK_METHOD0(HasVideo, bool());
- MOCK_METHOD1(SetCdm, void(MediaKeys* cdm));
+ MOCK_METHOD2(SetCdm,
+ void(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb));
private:
DISALLOW_COPY_AND_ASSIGN(MockRenderer);
@@ -188,10 +200,12 @@ class MockTimeSource : public TimeSource {
// TimeSource implementation.
MOCK_METHOD0(StartTicking, void());
MOCK_METHOD0(StopTicking, void());
- MOCK_METHOD1(SetPlaybackRate, void(float));
+ MOCK_METHOD1(SetPlaybackRate, void(double));
MOCK_METHOD1(SetMediaTime, void(base::TimeDelta));
MOCK_METHOD0(CurrentMediaTime, base::TimeDelta());
- MOCK_METHOD0(CurrentMediaTimeForSyncingVideo, base::TimeDelta());
+ MOCK_METHOD2(GetWallClockTimes,
+ bool(const std::vector<base::TimeDelta>&,
+ std::vector<base::TimeTicks>*));
private:
DISALLOW_COPY_AND_ASSIGN(MockTimeSource);
diff --git a/chromium/media/base/moving_average.cc b/chromium/media/base/moving_average.cc
new file mode 100644
index 00000000000..b78f4c0e3dc
--- /dev/null
+++ b/chromium/media/base/moving_average.cc
@@ -0,0 +1,41 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/moving_average.h"
+
+#include <algorithm>
+
+namespace media {
+
+MovingAverage::MovingAverage(size_t depth)
+ : depth_(depth), count_(0), samples_(depth_) {
+}
+
+MovingAverage::~MovingAverage() {
+}
+
+void MovingAverage::AddSample(base::TimeDelta sample) {
+ // |samples_| is zero-initialized, so |oldest| is also zero before |count_|
+ // exceeds |depth_|.
+ base::TimeDelta& oldest = samples_[count_++ % depth_];
+ total_ += sample - oldest;
+ oldest = sample;
+}
+
+base::TimeDelta MovingAverage::Average() const {
+ DCHECK_GT(count_, 0u);
+
+ // TODO(dalecurtis): Consider limiting |depth| to powers of two so that we can
+ // replace the integer divide with a bit shift operation.
+
+ return total_ / std::min(static_cast<uint64_t>(depth_), count_);
+}
+
+void MovingAverage::Reset() {
+ count_ = 0;
+ total_ = base::TimeDelta();
+ std::fill(samples_.begin(), samples_.end(), base::TimeDelta());
+}
+
+} // namespace media
diff --git a/chromium/media/base/moving_average.h b/chromium/media/base/moving_average.h
new file mode 100644
index 00000000000..18a934e2998
--- /dev/null
+++ b/chromium/media/base/moving_average.h
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MOVING_AVERAGE_H_
+#define MEDIA_BASE_MOVING_AVERAGE_H_
+
+#include <vector>
+
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Simple class for calculating a moving average of fixed size.
+class MEDIA_EXPORT MovingAverage {
+ public:
+ // Creates a MovingAverage instance with space for |depth| samples.
+ explicit MovingAverage(size_t depth);
+ ~MovingAverage();
+
+ // Adds a new sample to the average; replaces the oldest sample if |depth_|
+ // has been exceeded. Updates |total_| to the new sum of values.
+ void AddSample(base::TimeDelta sample);
+
+ // Returns the current average of all held samples.
+ base::TimeDelta Average() const;
+
+ // Resets the state of the class to its initial post-construction state.
+ void Reset();
+
+ size_t count() const { return count_; }
+
+ private:
+ // Maximum number of elements allowed in the average.
+ const size_t depth_;
+
+ // Number of elements seen thus far.
+ uint64_t count_;
+
+ std::vector<base::TimeDelta> samples_;
+ base::TimeDelta total_;
+
+ DISALLOW_COPY_AND_ASSIGN(MovingAverage);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MOVING_AVERAGE_H_
diff --git a/chromium/media/base/moving_average_unittest.cc b/chromium/media/base/moving_average_unittest.cc
new file mode 100644
index 00000000000..da8e51922d8
--- /dev/null
+++ b/chromium/media/base/moving_average_unittest.cc
@@ -0,0 +1,36 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+#include "media/base/moving_average.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(MovingAverageTest, Average) {
+ const int kSamples = 5;
+ MovingAverage moving_average(kSamples);
+ moving_average.AddSample(base::TimeDelta::FromSeconds(1));
+ EXPECT_EQ(base::TimeDelta::FromSeconds(1), moving_average.Average());
+ for (int i = 0; i < kSamples - 1; ++i)
+ moving_average.AddSample(base::TimeDelta::FromSeconds(1));
+ EXPECT_EQ(base::TimeDelta::FromSeconds(1), moving_average.Average());
+
+ for (int i = 0; i < kSamples; ++i) {
+ moving_average.AddSample(base::TimeDelta::FromMilliseconds(500));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(1000 - (i + 1) * 100),
+ moving_average.Average());
+ }
+}
+
+TEST(MovingAverageTest, Reset) {
+ MovingAverage moving_average(2);
+ moving_average.AddSample(base::TimeDelta::FromSeconds(1));
+ EXPECT_EQ(base::TimeDelta::FromSeconds(1), moving_average.Average());
+ moving_average.Reset();
+ moving_average.AddSample(base::TimeDelta());
+ EXPECT_EQ(base::TimeDelta(), moving_average.Average());
+}
+
+} // namespace media
diff --git a/chromium/media/base/multi_channel_resampler.cc b/chromium/media/base/multi_channel_resampler.cc
index b6bde7bbfab..b0ab88d6820 100644
--- a/chromium/media/base/multi_channel_resampler.cc
+++ b/chromium/media/base/multi_channel_resampler.cc
@@ -113,4 +113,11 @@ int MultiChannelResampler::ChunkSize() const {
return resamplers_[0]->ChunkSize();
}
+
+double MultiChannelResampler::BufferedFrames() const {
+ DCHECK(!resamplers_.empty());
+ return resamplers_[0]->BufferedFrames();
+}
+
+
} // namespace media
diff --git a/chromium/media/base/multi_channel_resampler.h b/chromium/media/base/multi_channel_resampler.h
index ee3222a9175..84fa9fe3ac1 100644
--- a/chromium/media/base/multi_channel_resampler.h
+++ b/chromium/media/base/multi_channel_resampler.h
@@ -51,6 +51,9 @@ class MEDIA_EXPORT MultiChannelResampler {
// single call to |read_cb_| for more data.
int ChunkSize() const;
+ // See SincResampler::BufferedFrames.
+ double BufferedFrames() const;
+
private:
// SincResampler::ReadCB implementation. ProvideInput() will be called for
// each channel (in channel order) as SincResampler needs more data.
diff --git a/chromium/media/base/null_video_sink.cc b/chromium/media/base/null_video_sink.cc
new file mode 100644
index 00000000000..03a834f3361
--- /dev/null
+++ b/chromium/media/base/null_video_sink.cc
@@ -0,0 +1,95 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/null_video_sink.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+
+namespace media {
+
+NullVideoSink::NullVideoSink(
+ bool clockless,
+ base::TimeDelta interval,
+ const NewFrameCB& new_frame_cb,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : clockless_(clockless),
+ interval_(interval),
+ new_frame_cb_(new_frame_cb),
+ task_runner_(task_runner),
+ started_(false),
+ callback_(nullptr),
+ tick_clock_(&default_tick_clock_),
+ background_render_(false) {
+}
+
+NullVideoSink::~NullVideoSink() {
+ DCHECK(!started_);
+}
+
+void NullVideoSink::Start(RenderCallback* callback) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!started_);
+ callback_ = callback;
+ started_ = true;
+ last_now_ = current_render_time_ = tick_clock_->NowTicks();
+ cancelable_worker_.Reset(
+ base::Bind(&NullVideoSink::CallRender, base::Unretained(this)));
+ task_runner_->PostTask(FROM_HERE, cancelable_worker_.callback());
+}
+
+void NullVideoSink::Stop() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ cancelable_worker_.Cancel();
+ started_ = false;
+ if (!stop_cb_.is_null())
+ base::ResetAndReturn(&stop_cb_).Run();
+}
+
+void NullVideoSink::CallRender() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(started_);
+
+ const base::TimeTicks end_of_interval = current_render_time_ + interval_;
+ scoped_refptr<VideoFrame> new_frame = callback_->Render(
+ current_render_time_, end_of_interval, background_render_);
+ const bool is_new_frame = new_frame != last_frame_;
+ last_frame_ = new_frame;
+ if (is_new_frame)
+ new_frame_cb_.Run(new_frame);
+
+ current_render_time_ += interval_;
+
+ if (clockless_) {
+ task_runner_->PostTask(FROM_HERE, cancelable_worker_.callback());
+ return;
+ }
+
+ const base::TimeTicks now = tick_clock_->NowTicks();
+ base::TimeDelta delay;
+ if (last_now_ == now) {
+ // The tick clock is frozen in this case, so don't advance deadline.
+ delay = interval_;
+ current_render_time_ = now;
+ } else {
+ // If we're behind, find the next nearest on time interval.
+ delay = current_render_time_ - now;
+ if (delay < base::TimeDelta())
+ delay += interval_ * (-delay / interval_ + 1);
+ current_render_time_ = now + delay;
+ last_now_ = now;
+ }
+
+ task_runner_->PostDelayedTask(FROM_HERE, cancelable_worker_.callback(),
+ delay);
+}
+
+void NullVideoSink::PaintFrameUsingOldRenderingPath(
+ const scoped_refptr<VideoFrame>& frame) {
+ new_frame_cb_.Run(frame);
+}
+
+} // namespace media
diff --git a/chromium/media/base/null_video_sink.h b/chromium/media/base/null_video_sink.h
new file mode 100644
index 00000000000..66ff79f59f9
--- /dev/null
+++ b/chromium/media/base/null_video_sink.h
@@ -0,0 +1,95 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_NULL_VIDEO_SINK_H_
+#define MEDIA_AUDIO_NULL_VIDEO_SINK_H_
+
+#include "base/cancelable_callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/base/video_renderer_sink.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+class NullVideoSink : public VideoRendererSink {
+ public:
+ using NewFrameCB = base::Callback<void(const scoped_refptr<VideoFrame>&)>;
+
+ // Periodically calls |callback| every |interval| on |task_runner| once the
+ // sink has been started. If |clockless| is true, the RenderCallback will
+ // be called back to back by repeated post tasks. Optionally, if specified,
+ // |new_frame_cb| will be called for each new frame received.
+ NullVideoSink(bool clockless,
+ base::TimeDelta interval,
+ const NewFrameCB& new_frame_cb,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+ ~NullVideoSink() override;
+
+ // VideoRendererSink implementation.
+ void Start(RenderCallback* callback) override;
+ void Stop() override;
+ void PaintFrameUsingOldRenderingPath(
+ const scoped_refptr<VideoFrame>& frame) override;
+
+ void set_tick_clock_for_testing(base::TickClock* tick_clock) {
+ tick_clock_ = tick_clock;
+ }
+
+ // Sets |stop_cb_|, which will be fired when Stop() is called.
+ void set_stop_cb(const base::Closure& stop_cb) {
+ stop_cb_ = stop_cb;
+ }
+
+ bool is_started() const { return started_; }
+
+ void set_background_render(bool is_background_rendering) {
+ background_render_ = is_background_rendering;
+ }
+
+ private:
+ // Task that periodically calls Render() to consume video data.
+ void CallRender();
+
+ const bool clockless_;
+ const base::TimeDelta interval_;
+ const NewFrameCB new_frame_cb_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ bool started_;
+ RenderCallback* callback_;
+
+ // Manages cancellation of periodic Render() callback task.
+ base::CancelableClosure cancelable_worker_;
+
+ // Used to determine when a new frame is received.
+ scoped_refptr<VideoFrame> last_frame_;
+
+ // Used to determine the interval given to RenderCallback::Render() as well as
+ // to maintain stable periodicity of callbacks.
+ base::TimeTicks current_render_time_;
+
+ // Allow for an injectable tick clock for testing.
+ base::DefaultTickClock default_tick_clock_;
+ base::TimeTicks last_now_;
+
+ // If specified, used instead of |default_tick_clock_|.
+ base::TickClock* tick_clock_;
+
+ // If set, called when Stop() is called.
+ base::Closure stop_cb_;
+
+ // Value passed to RenderCallback::Render().
+ bool background_render_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullVideoSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_NULL_VIDEO_SINK_H_
diff --git a/chromium/media/base/null_video_sink_unittest.cc b/chromium/media/base/null_video_sink_unittest.cc
new file mode 100644
index 00000000000..0279695a214
--- /dev/null
+++ b/chromium/media/base/null_video_sink_unittest.cc
@@ -0,0 +1,149 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/base/null_video_sink.h"
+#include "media/base/test_helpers.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+
+namespace media {
+
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+class NullVideoSinkTest : public testing::Test,
+ public VideoRendererSink::RenderCallback {
+ public:
+ NullVideoSinkTest() {
+ // Never use null TimeTicks since they have special connotations.
+ tick_clock_.Advance(base::TimeDelta::FromMicroseconds(12345));
+ }
+ ~NullVideoSinkTest() override {}
+
+ scoped_ptr<NullVideoSink> ConstructSink(bool clockless,
+ base::TimeDelta interval) {
+ scoped_ptr<NullVideoSink> new_sink(new NullVideoSink(
+ clockless, interval,
+ base::Bind(&NullVideoSinkTest::FrameReceived, base::Unretained(this)),
+ message_loop_.task_runner()));
+ new_sink->set_tick_clock_for_testing(&tick_clock_);
+ return new_sink;
+ }
+
+ scoped_refptr<VideoFrame> CreateFrame(base::TimeDelta timestamp) {
+ const gfx::Size natural_size(8, 8);
+ return VideoFrame::CreateFrame(VideoFrame::YV12, natural_size,
+ gfx::Rect(natural_size), natural_size,
+ timestamp);
+ }
+
+ // VideoRendererSink::RenderCallback implementation.
+ MOCK_METHOD3(Render,
+ scoped_refptr<VideoFrame>(base::TimeTicks,
+ base::TimeTicks,
+ bool));
+ MOCK_METHOD0(OnFrameDropped, void());
+
+ MOCK_METHOD1(FrameReceived, void(const scoped_refptr<VideoFrame>&));
+
+ protected:
+ base::MessageLoop message_loop_;
+ base::SimpleTestTickClock tick_clock_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullVideoSinkTest);
+};
+
+TEST_F(NullVideoSinkTest, BasicFunctionality) {
+ const base::TimeDelta kInterval = base::TimeDelta::FromMilliseconds(25);
+
+ scoped_ptr<NullVideoSink> sink = ConstructSink(false, kInterval);
+ scoped_refptr<VideoFrame> test_frame = CreateFrame(base::TimeDelta());
+
+ // The sink shouldn't have to be started to use the paint method.
+ EXPECT_CALL(*this, FrameReceived(test_frame));
+ sink->PaintFrameUsingOldRenderingPath(test_frame);
+
+ {
+ SCOPED_TRACE("Waiting for sink startup.");
+ sink->Start(this);
+ const base::TimeTicks current_time = tick_clock_.NowTicks();
+ const base::TimeTicks current_interval_end = current_time + kInterval;
+ EXPECT_CALL(*this, Render(current_time, current_interval_end, false))
+ .WillOnce(Return(test_frame));
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(*this, FrameReceived(test_frame))
+ .WillOnce(RunClosure(event.GetClosure()));
+ event.RunAndWait();
+ }
+
+ // Verify that toggling background rendering mode issues the right bit to
+ // each Render() call.
+ sink->set_background_render(true);
+
+ // A second call returning the same frame should not result in a new call to
+ // FrameReceived().
+ {
+ SCOPED_TRACE("Waiting for second render call.");
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(*this, Render(_, _, true))
+ .WillOnce(Return(test_frame))
+ .WillOnce(Return(nullptr));
+ EXPECT_CALL(*this, FrameReceived(test_frame)).Times(0);
+ EXPECT_CALL(*this, FrameReceived(scoped_refptr<VideoFrame>()))
+ .WillOnce(RunClosure(event.GetClosure()));
+ event.RunAndWait();
+ }
+
+ {
+ SCOPED_TRACE("Waiting for stop event.");
+ WaitableMessageLoopEvent event;
+ sink->set_stop_cb(event.GetClosure());
+ sink->Stop();
+ event.RunAndWait();
+ }
+}
+
+TEST_F(NullVideoSinkTest, ClocklessFunctionality) {
+ // Construct the sink with a huge interval, it should still complete quickly.
+ const base::TimeDelta interval = base::TimeDelta::FromSeconds(10);
+ scoped_ptr<NullVideoSink> sink = ConstructSink(true, interval);
+
+ scoped_refptr<VideoFrame> test_frame = CreateFrame(base::TimeDelta());
+ sink->Start(this);
+
+ EXPECT_CALL(*this, FrameReceived(test_frame)).Times(1);
+ EXPECT_CALL(*this, FrameReceived(scoped_refptr<VideoFrame>())).Times(1);
+
+ const int kTestRuns = 6;
+ const base::TimeTicks now = base::TimeTicks::Now();
+ const base::TimeTicks current_time = tick_clock_.NowTicks();
+
+ SCOPED_TRACE("Waiting for multiple render callbacks");
+ WaitableMessageLoopEvent event;
+ for (int i = 0; i < kTestRuns; ++i) {
+ if (i < kTestRuns - 1) {
+ EXPECT_CALL(*this, Render(current_time + i * interval,
+ current_time + (i + 1) * interval, false))
+ .WillOnce(Return(test_frame));
+ } else {
+ EXPECT_CALL(*this, Render(current_time + i * interval,
+ current_time + (i + 1) * interval, false))
+ .WillOnce(DoAll(RunClosure(event.GetClosure()), Return(nullptr)));
+ }
+ }
+ event.RunAndWait();
+ ASSERT_LT(base::TimeTicks::Now() - now, kTestRuns * interval);
+ sink->Stop();
+}
+
+} // namespace media
diff --git a/chromium/media/base/pipeline.cc b/chromium/media/base/pipeline.cc
index 05971052537..4a397049fc0 100644
--- a/chromium/media/base/pipeline.cc
+++ b/chromium/media/base/pipeline.cc
@@ -37,13 +37,13 @@ Pipeline::Pipeline(
running_(false),
did_loading_progress_(false),
volume_(1.0f),
- playback_rate_(0.0f),
+ playback_rate_(0.0),
status_(PIPELINE_OK),
- is_initialized_(false),
state_(kCreated),
renderer_ended_(false),
text_renderer_ended_(false),
demuxer_(NULL),
+ pending_cdm_context_(nullptr),
weak_factory_(this) {
media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(kCreated));
media_log_->AddEvent(
@@ -69,7 +69,8 @@ void Pipeline::Start(Demuxer* demuxer,
const PipelineMetadataCB& metadata_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& duration_change_cb,
- const AddTextTrackCB& add_text_track_cb) {
+ const AddTextTrackCB& add_text_track_cb,
+ const base::Closure& waiting_for_decryption_key_cb) {
DCHECK(!ended_cb.is_null());
DCHECK(!error_cb.is_null());
DCHECK(!seek_cb.is_null());
@@ -89,6 +90,7 @@ void Pipeline::Start(Demuxer* demuxer,
buffering_state_cb_ = buffering_state_cb;
duration_change_cb_ = duration_change_cb;
add_text_track_cb_ = add_text_track_cb;
+ waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
task_runner_->PostTask(
FROM_HERE, base::Bind(&Pipeline::StartTask, weak_factory_.GetWeakPtr()));
@@ -119,13 +121,13 @@ bool Pipeline::IsRunning() const {
return running_;
}
-float Pipeline::GetPlaybackRate() const {
+double Pipeline::GetPlaybackRate() const {
base::AutoLock auto_lock(lock_);
return playback_rate_;
}
-void Pipeline::SetPlaybackRate(float playback_rate) {
- if (playback_rate < 0.0f)
+void Pipeline::SetPlaybackRate(double playback_rate) {
+ if (playback_rate < 0.0)
return;
base::AutoLock auto_lock(lock_);
@@ -159,11 +161,8 @@ void Pipeline::SetVolume(float volume) {
TimeDelta Pipeline::GetMediaTime() const {
base::AutoLock auto_lock(lock_);
- if (!renderer_)
- return TimeDelta();
-
- TimeDelta media_time = renderer_->GetMediaTime();
- return std::min(media_time, duration_);
+ return renderer_ ? std::min(renderer_->GetMediaTime(), duration_)
+ : TimeDelta();
}
Ranges<TimeDelta> Pipeline::GetBufferedTimeRanges() const {
@@ -188,6 +187,13 @@ PipelineStatistics Pipeline::GetStatistics() const {
return statistics_;
}
+void Pipeline::SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) {
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Pipeline::SetCdmTask, weak_factory_.GetWeakPtr(),
+ cdm_context, cdm_attached_cb));
+}
+
void Pipeline::SetErrorForTesting(PipelineStatus status) {
OnError(status);
}
@@ -295,15 +301,6 @@ void Pipeline::SetDuration(TimeDelta duration) {
duration_change_cb_.Run();
}
-void Pipeline::OnStateTransition(PipelineStatus status) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- // Force post to process state transitions after current execution frame.
- task_runner_->PostTask(
- FROM_HERE,
- base::Bind(
- &Pipeline::StateTransitionTask, weak_factory_.GetWeakPtr(), status));
-}
-
void Pipeline::StateTransitionTask(PipelineStatus status) {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -327,7 +324,7 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
pending_callbacks_.reset();
PipelineStatusCB done_cb =
- base::Bind(&Pipeline::OnStateTransition, weak_factory_.GetWeakPtr());
+ base::Bind(&Pipeline::StateTransitionTask, weak_factory_.GetWeakPtr());
// Switch states, performing any entrance actions for the new state as well.
SetState(GetNextState());
@@ -336,24 +333,25 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
return InitializeDemuxer(done_cb);
case kInitRenderer:
- return InitializeRenderer(base::Bind(done_cb, PIPELINE_OK));
+ // When the state_ transfers to kInitRenderer, it means the demuxer has
+ // finished parsing the init info. It should call ReportMetadata in case
+ // meeting 'decode' error when passing media segment but WebMediaPlayer's
+ // ready_state_ is still ReadyStateHaveNothing. In that case, it will
+ // treat it as NetworkStateFormatError not NetworkStateDecodeError.
+ ReportMetadata();
+ start_timestamp_ = demuxer_->GetStartTime();
- case kPlaying:
- // Report metadata the first time we enter the playing state.
- if (!is_initialized_) {
- is_initialized_ = true;
- ReportMetadata();
- start_timestamp_ = demuxer_->GetStartTime();
- }
-
- base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
+ return InitializeRenderer(done_cb);
+ case kPlaying:
DCHECK(start_timestamp_ >= base::TimeDelta());
renderer_->StartPlayingFrom(start_timestamp_);
if (text_renderer_)
text_renderer_->StartPlaying();
+ base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
+
PlaybackRateChangedTask(GetPlaybackRate());
VolumeChangedTask(GetVolume());
return;
@@ -493,6 +491,12 @@ void Pipeline::StartTask() {
base::Bind(&Pipeline::OnTextRendererEnded, weak_factory_.GetWeakPtr()));
}
+ // Set CDM early to avoid unnecessary delay in Renderer::Initialize().
+ if (pending_cdm_context_) {
+ renderer_->SetCdm(pending_cdm_context_, base::Bind(&IgnoreCdmAttached));
+ pending_cdm_context_ = nullptr;
+ }
+
StateTransitionTask(PIPELINE_OK);
}
@@ -548,7 +552,7 @@ void Pipeline::ErrorChangedTask(PipelineStatus error) {
DoStop(base::Bind(&Pipeline::OnStopCompleted, weak_factory_.GetWeakPtr()));
}
-void Pipeline::PlaybackRateChangedTask(float playback_rate) {
+void Pipeline::PlaybackRateChangedTask(double playback_rate) {
DCHECK(task_runner_->BelongsToCurrentThread());
// Playback rate changes are only carried out while playing.
@@ -595,8 +599,20 @@ void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
text_renderer_ended_ = false;
start_timestamp_ = seek_timestamp;
- DoSeek(seek_timestamp,
- base::Bind(&Pipeline::OnStateTransition, weak_factory_.GetWeakPtr()));
+ DoSeek(seek_timestamp, base::Bind(&Pipeline::StateTransitionTask,
+ weak_factory_.GetWeakPtr()));
+}
+
+void Pipeline::SetCdmTask(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) {
+ base::AutoLock auto_lock(lock_);
+ if (!renderer_) {
+ pending_cdm_context_ = cdm_context;
+ cdm_attached_cb.Run(true);
+ return;
+ }
+
+ renderer_->SetCdm(cdm_context, cdm_attached_cb);
}
void Pipeline::OnRendererEnded() {
@@ -641,7 +657,7 @@ void Pipeline::RunEndedCallbackIfNeeded() {
scoped_ptr<TextRenderer> Pipeline::CreateTextRenderer() {
DCHECK(task_runner_->BelongsToCurrentThread());
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
if (!cmd_line->HasSwitch(switches::kEnableInbandTextTracks))
return scoped_ptr<media::TextRenderer>();
@@ -676,7 +692,7 @@ void Pipeline::InitializeDemuxer(const PipelineStatusCB& done_cb) {
demuxer_->Initialize(this, done_cb, text_renderer_);
}
-void Pipeline::InitializeRenderer(const base::Closure& done_cb) {
+void Pipeline::InitializeRenderer(const PipelineStatusCB& done_cb) {
DCHECK(task_runner_->BelongsToCurrentThread());
if (!demuxer_->GetStream(DemuxerStream::AUDIO) &&
@@ -694,22 +710,25 @@ void Pipeline::InitializeRenderer(const base::Closure& done_cb) {
demuxer_,
done_cb,
base::Bind(&Pipeline::OnUpdateStatistics, weak_this),
+ base::Bind(&Pipeline::BufferingStateChanged, weak_this),
base::Bind(&Pipeline::OnRendererEnded, weak_this),
base::Bind(&Pipeline::OnError, weak_this),
- base::Bind(&Pipeline::BufferingStateChanged, weak_this));
+ waiting_for_decryption_key_cb_);
}
void Pipeline::ReportMetadata() {
DCHECK(task_runner_->BelongsToCurrentThread());
PipelineMetadata metadata;
- metadata.has_audio = renderer_->HasAudio();
- metadata.has_video = renderer_->HasVideo();
metadata.timeline_offset = demuxer_->GetTimelineOffset();
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
if (stream) {
+ metadata.has_video = true;
metadata.natural_size = stream->video_decoder_config().natural_size();
metadata.video_rotation = stream->video_rotation();
}
+ if (demuxer_->GetStream(DemuxerStream::AUDIO)) {
+ metadata.has_audio = true;
+ }
metadata_cb_.Run(metadata);
}
diff --git a/chromium/media/base/pipeline.h b/chromium/media/base/pipeline.h
index 71c16e1ee0c..b63be0093bf 100644
--- a/chromium/media/base/pipeline.h
+++ b/chromium/media/base/pipeline.h
@@ -6,11 +6,13 @@
#define MEDIA_BASE_PIPELINE_H_
#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/time/default_tick_clock.h"
#include "media/base/buffering_state.h"
+#include "media/base/cdm_context.h"
#include "media/base/demuxer.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
@@ -18,7 +20,7 @@
#include "media/base/serial_runner.h"
#include "media/base/text_track.h"
#include "media/base/video_rotation.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace base {
class SingleThreadTaskRunner;
@@ -32,6 +34,7 @@ class Renderer;
class TextRenderer;
class TextTrackConfig;
class TimeDeltaInterpolator;
+class VideoFrame;
// Metadata describing a pipeline once it has been initialized.
struct PipelineMetadata {
@@ -76,6 +79,9 @@ typedef base::Callback<void(PipelineMetadata)> PipelineMetadataCB;
// "Stopped" state.
class MEDIA_EXPORT Pipeline : public DemuxerHost {
public:
+ // Used to paint VideoFrame.
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> PaintCB;
+
// Constructs a media pipeline that will execute on |task_runner|.
Pipeline(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
MediaLog* media_log);
@@ -97,6 +103,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// |duration_change_cb| optional callback that will be executed whenever the
// presentation duration changes.
// |add_text_track_cb| will be executed whenever a text track is added.
+ // |waiting_for_decryption_key_cb| will be executed whenever the key needed
+ // to decrypt the stream is not available.
// It is an error to call this method after the pipeline has already started.
void Start(Demuxer* demuxer,
scoped_ptr<Renderer> renderer,
@@ -106,7 +114,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
const PipelineMetadataCB& metadata_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& duration_change_cb,
- const AddTextTrackCB& add_text_track_cb);
+ const AddTextTrackCB& add_text_track_cb,
+ const base::Closure& waiting_for_decryption_key_cb);
// Asynchronously stops the pipeline, executing |stop_cb| when the pipeline
// teardown has completed.
@@ -132,17 +141,17 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
bool IsRunning() const;
// Gets the current playback rate of the pipeline. When the pipeline is
- // started, the playback rate will be 0.0f. A rate of 1.0f indicates
+ // started, the playback rate will be 0.0. A rate of 1.0 indicates
// that the pipeline is rendering the media at the standard rate. Valid
- // values for playback rate are >= 0.0f.
- float GetPlaybackRate() const;
+ // values for playback rate are >= 0.0.
+ double GetPlaybackRate() const;
- // Attempt to adjust the playback rate. Setting a playback rate of 0.0f pauses
- // all rendering of the media. A rate of 1.0f indicates a normal playback
- // rate. Values for the playback rate must be greater than or equal to 0.0f.
+ // Attempt to adjust the playback rate. Setting a playback rate of 0.0 pauses
+ // all rendering of the media. A rate of 1.0 indicates a normal playback
+ // rate. Values for the playback rate must be greater than or equal to 0.0.
//
// TODO(scherkus): What about maximum rate? Does HTML5 specify a max?
- void SetPlaybackRate(float playback_rate);
+ void SetPlaybackRate(double playback_rate);
// Gets the current volume setting being used by the audio renderer. When
// the pipeline is started, this value will be 1.0f. Valid values range
@@ -172,6 +181,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Gets the current pipeline statistics.
PipelineStatistics GetStatistics() const;
+ void SetCdm(CdmContext* cdm_context, const CdmAttachedCB& cdm_attached_cb);
+
void SetErrorForTesting(PipelineStatus status);
bool HasWeakPtrsForTesting() const;
@@ -231,7 +242,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
void ErrorChangedTask(PipelineStatus error);
// Carries out notifying filters that the playback rate has changed.
- void PlaybackRateChangedTask(float playback_rate);
+ void PlaybackRateChangedTask(double playback_rate);
// Carries out notifying filters that the volume has changed.
void VolumeChangedTask(float volume);
@@ -239,6 +250,13 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Carries out notifying filters that we are seeking to a new timestamp.
void SeekTask(base::TimeDelta time, const PipelineStatusCB& seek_cb);
+ // Carries out setting the |cdm_context| in |renderer_|, and then fires
+ // |cdm_attached_cb| with the result. If |renderer_| is null,
+ // |cdm_attached_cb| will be fired immediately with true, and |cdm_context|
+ // will be set in |renderer_| later when |renderer_| is created.
+ void SetCdmTask(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb);
+
// Callbacks executed when a renderer has ended.
void OnRendererEnded();
void OnTextRendererEnded();
@@ -260,9 +278,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Kicks off initialization for each media object, executing |done_cb| with
// the result when completed.
void InitializeDemuxer(const PipelineStatusCB& done_cb);
- void InitializeRenderer(const base::Closure& done_cb);
+ void InitializeRenderer(const PipelineStatusCB& done_cb);
- void OnStateTransition(PipelineStatus status);
void StateTransitionTask(PipelineStatus status);
// Initiates an asynchronous pause-flush-seek-preroll call sequence
@@ -302,10 +319,10 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// filters.
float volume_;
- // Current playback rate (>= 0.0f). This value is set immediately via
+ // Current playback rate (>= 0.0). This value is set immediately via
// SetPlaybackRate() and a task is dispatched on the task runner to notify
// the filters.
- float playback_rate_;
+ double playback_rate_;
// Current duration as reported by |demuxer_|.
base::TimeDelta duration_;
@@ -319,8 +336,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// The following data members are only accessed by tasks posted to
// |task_runner_|.
- bool is_initialized_;
-
// Member that tracks the current state.
State state_;
@@ -344,6 +359,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
BufferingStateCB buffering_state_cb_;
base::Closure duration_change_cb_;
AddTextTrackCB add_text_track_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
// Holds the initialized demuxer. Used for seeking. Owned by client.
Demuxer* demuxer_;
@@ -357,6 +373,11 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
scoped_ptr<SerialRunner> pending_callbacks_;
+ // CdmContext to be used to decrypt (and decode) encrypted stream in this
+ // pipeline. Non-null only when SetCdm() is called and the pipeline has not
+ // been started. Then during Start(), this value will be set on |renderer_|.
+ CdmContext* pending_cdm_context_;
+
base::ThreadChecker thread_checker_;
// NOTE: Weak pointers must be invalidated before all other member variables.
diff --git a/chromium/media/base/pipeline_unittest.cc b/chromium/media/base/pipeline_unittest.cc
index 5bb48ad5716..d4f9fa0984b 100644
--- a/chromium/media/base/pipeline_unittest.cc
+++ b/chromium/media/base/pipeline_unittest.cc
@@ -20,7 +20,7 @@
#include "media/base/text_track_config.h"
#include "media/base/time_delta_interpolator.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
using ::testing::_;
using ::testing::AnyNumber;
@@ -55,6 +55,13 @@ ACTION_P2(SetBufferingState, cb, buffering_state) {
cb->Run(buffering_state);
}
+ACTION_TEMPLATE(PostCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(p0)) {
+ return base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(::std::tr1::get<k>(args), p0));
+}
+
// TODO(scherkus): even though some filters are initialized on separate
// threads these test aren't flaky... why? It's because filters' Initialize()
// is executed on |message_loop_| and the mock filters instantly call
@@ -98,9 +105,6 @@ class PipelineTest : public ::testing::Test {
EXPECT_CALL(*demuxer_, GetTimelineOffset())
.WillRepeatedly(Return(base::Time()));
- EXPECT_CALL(*demuxer_, GetLiveness())
- .WillRepeatedly(Return(Demuxer::LIVENESS_UNKNOWN));
-
EXPECT_CALL(*renderer_, GetMediaTime())
.WillRepeatedly(Return(base::TimeDelta()));
@@ -141,7 +145,7 @@ class PipelineTest : public ::testing::Test {
EXPECT_CALL(callbacks_, OnDurationChange());
EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(DoAll(SetDemuxerProperties(duration),
- RunCallback<1>(PIPELINE_OK)));
+ PostCallback<1>(PIPELINE_OK)));
// Configure the demuxer to return the streams.
for (size_t i = 0; i < streams->size(); ++i) {
@@ -165,10 +169,10 @@ class PipelineTest : public ::testing::Test {
// Sets up expectations to allow the video renderer to initialize.
void SetRendererExpectations() {
- EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _))
- .WillOnce(DoAll(SaveArg<3>(&ended_cb_),
- SaveArg<5>(&buffering_state_cb_),
- RunCallback<1>()));
+ EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<3>(&buffering_state_cb_),
+ SaveArg<4>(&ended_cb_),
+ PostCallback<1>(PIPELINE_OK)));
EXPECT_CALL(*renderer_, HasAudio()).WillRepeatedly(Return(audio_stream()));
EXPECT_CALL(*renderer_, HasVideo()).WillRepeatedly(Return(video_stream()));
}
@@ -182,9 +186,9 @@ class PipelineTest : public ::testing::Test {
}
void StartPipeline() {
+ EXPECT_CALL(*this, OnWaitingForDecryptionKey()).Times(0);
pipeline_->Start(
- demuxer_.get(),
- scoped_renderer_.Pass(),
+ demuxer_.get(), scoped_renderer_.Pass(),
base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
@@ -193,7 +197,9 @@ class PipelineTest : public ::testing::Test {
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnDurationChange,
base::Unretained(&callbacks_)),
- base::Bind(&PipelineTest::OnAddTextTrack, base::Unretained(this)));
+ base::Bind(&PipelineTest::OnAddTextTrack, base::Unretained(this)),
+ base::Bind(&PipelineTest::OnWaitingForDecryptionKey,
+ base::Unretained(this)));
}
// Sets up expectations on the callback and initializes the pipeline. Called
@@ -203,7 +209,7 @@ class PipelineTest : public ::testing::Test {
if (start_status == PIPELINE_OK) {
EXPECT_CALL(callbacks_, OnMetadata(_)).WillOnce(SaveArg<0>(&metadata_));
- EXPECT_CALL(*renderer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*renderer_, SetPlaybackRate(0.0));
EXPECT_CALL(*renderer_, SetVolume(1.0f));
EXPECT_CALL(*renderer_, StartPlayingFrom(start_time_))
.WillOnce(SetBufferingState(&buffering_state_cb_,
@@ -290,6 +296,7 @@ class PipelineTest : public ::testing::Test {
MOCK_METHOD2(OnAddTextTrack, void(const TextTrackConfig&,
const AddTextTrackDoneCB&));
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
void DoOnAddTextTrack(const TextTrackConfig& config,
const AddTextTrackDoneCB& done_cb) {
@@ -330,9 +337,9 @@ TEST_F(PipelineTest, NotStarted) {
// Setting should still work.
EXPECT_EQ(0.0f, pipeline_->GetPlaybackRate());
- pipeline_->SetPlaybackRate(-1.0f);
+ pipeline_->SetPlaybackRate(-1.0);
EXPECT_EQ(0.0f, pipeline_->GetPlaybackRate());
- pipeline_->SetPlaybackRate(1.0f);
+ pipeline_->SetPlaybackRate(1.0);
EXPECT_EQ(1.0f, pipeline_->GetPlaybackRate());
// Setting should still work.
@@ -373,7 +380,7 @@ TEST_F(PipelineTest, StopWithoutStart) {
TEST_F(PipelineTest, StartThenStopImmediately) {
EXPECT_CALL(*demuxer_, Initialize(_, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(PostCallback<1>(PIPELINE_OK));
EXPECT_CALL(*demuxer_, Stop());
EXPECT_CALL(callbacks_, OnStart(_));
@@ -407,7 +414,7 @@ TEST_F(PipelineTest, DemuxerErrorDuringStop) {
TEST_F(PipelineTest, URLNotFound) {
EXPECT_CALL(*demuxer_, Initialize(_, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_ERROR_URL_NOT_FOUND));
+ .WillOnce(PostCallback<1>(PIPELINE_ERROR_URL_NOT_FOUND));
EXPECT_CALL(*demuxer_, Stop());
StartPipelineAndExpect(PIPELINE_ERROR_URL_NOT_FOUND);
@@ -415,8 +422,9 @@ TEST_F(PipelineTest, URLNotFound) {
TEST_F(PipelineTest, NoStreams) {
EXPECT_CALL(*demuxer_, Initialize(_, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(PostCallback<1>(PIPELINE_OK));
EXPECT_CALL(*demuxer_, Stop());
+ EXPECT_CALL(callbacks_, OnMetadata(_));
StartPipelineAndExpect(PIPELINE_ERROR_COULD_NOT_RENDER);
}
@@ -632,7 +640,7 @@ TEST_F(PipelineTest, ErrorDuringSeek) {
SetRendererExpectations();
StartPipelineAndExpect(PIPELINE_OK);
- float playback_rate = 1.0f;
+ double playback_rate = 1.0;
EXPECT_CALL(*renderer_, SetPlaybackRate(playback_rate));
pipeline_->SetPlaybackRate(playback_rate);
message_loop_.RunUntilIdle();
@@ -667,7 +675,7 @@ static void TestNoCallsAfterError(
EXPECT_TRUE(message_loop->IsIdleForTesting());
// Make calls on pipeline after error has occurred.
- pipeline->SetPlaybackRate(0.5f);
+ pipeline->SetPlaybackRate(0.5);
pipeline->SetVolume(0.5f);
// No additional tasks should be queued as a result of these calls.
@@ -823,12 +831,12 @@ class PipelineTeardownTest : public PipelineTest {
if (stop_or_error == kStop) {
EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
- RunCallback<1>(PIPELINE_OK)));
+ PostCallback<1>(PIPELINE_OK)));
ExpectPipelineStopAndDestroyPipeline();
} else {
status = DEMUXER_ERROR_COULD_NOT_OPEN;
EXPECT_CALL(*demuxer_, Initialize(_, _, _))
- .WillOnce(RunCallback<1>(status));
+ .WillOnce(PostCallback<1>(status));
}
EXPECT_CALL(*demuxer_, Stop());
@@ -847,28 +855,29 @@ class PipelineTeardownTest : public PipelineTest {
if (state == kInitRenderer) {
if (stop_or_error == kStop) {
- EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _))
+ EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _, _))
.WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
- RunCallback<1>()));
+ PostCallback<1>(PIPELINE_OK)));
ExpectPipelineStopAndDestroyPipeline();
} else {
status = PIPELINE_ERROR_INITIALIZATION_FAILED;
- EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _))
- .WillOnce(DoAll(RunCallback<4>(status), RunCallback<1>()));
+ EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _, _))
+ .WillOnce(PostCallback<1>(status));
}
EXPECT_CALL(*demuxer_, Stop());
+ EXPECT_CALL(callbacks_, OnMetadata(_));
return status;
}
- EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _))
- .WillOnce(DoAll(SaveArg<5>(&buffering_state_cb_),
- RunCallback<1>()));
+ EXPECT_CALL(*renderer_, Initialize(_, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<3>(&buffering_state_cb_),
+ PostCallback<1>(PIPELINE_OK)));
EXPECT_CALL(callbacks_, OnMetadata(_));
// If we get here it's a successful initialization.
- EXPECT_CALL(*renderer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*renderer_, SetPlaybackRate(0.0));
EXPECT_CALL(*renderer_, SetVolume(1.0f));
EXPECT_CALL(*renderer_, StartPlayingFrom(base::TimeDelta()))
.WillOnce(SetBufferingState(&buffering_state_cb_,
diff --git a/chromium/media/base/renderer.h b/chromium/media/base/renderer.h
index 78ae043ee30..75372fba3a1 100644
--- a/chromium/media/base/renderer.h
+++ b/chromium/media/base/renderer.h
@@ -6,18 +6,21 @@
#define MEDIA_BASE_RENDERER_H_
#include "base/callback.h"
+#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/buffering_state.h"
+#include "media/base/cdm_context.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
namespace media {
-class MediaKeys;
class DemuxerStreamProvider;
+class VideoFrame;
class MEDIA_EXPORT Renderer {
public:
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> PaintCB;
typedef base::Callback<base::TimeDelta()> TimeDeltaCB;
Renderer();
@@ -26,21 +29,32 @@ class MEDIA_EXPORT Renderer {
virtual ~Renderer();
// Initializes the Renderer with |demuxer_stream_provider|, executing
- // |init_cb| upon completion. If initialization failed, fires |error_cb|
- // before |init_cb|. |demuxer_stream_provider| must be valid throughout the
- // lifetime of the Renderer object.
+ // |init_cb| upon completion. If initialization fails, only |init_cb| (not
+ // |error_cb|) should be called. |demuxer_stream_provider| must be valid for
+ // the lifetime of the Renderer object. |init_cb| must only be run after this
+ // method has returned. Firing |init_cb| may result in the immediate
+ // destruction of the caller, so it must be run only prior to returning.
//
// Permanent callbacks:
// - |statistics_cb|: Executed periodically with rendering statistics.
- // - |time_cb|: Executed whenever time has advanced through rendering.
+ // - |buffering_state_cb|: Executed when buffering state is changed.
// - |ended_cb|: Executed when rendering has reached the end of stream.
- // - |error_cb|: Executed if any error was encountered during rendering.
- virtual void Initialize(DemuxerStreamProvider* demuxer_stream_provider,
- const base::Closure& init_cb,
- const StatisticsCB& statistics_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const BufferingStateCB& buffering_state_cb) = 0;
+ // - |error_cb|: Executed if any error was encountered after initialization.
+ // - |waiting_for_decryption_key_cb|: Executed whenever the key needed to
+ // decrypt the stream is not available.
+ virtual void Initialize(
+ DemuxerStreamProvider* demuxer_stream_provider,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const base::Closure& waiting_for_decryption_key_cb) = 0;
+
+ // Associates the |cdm_context| with this Renderer for decryption (and
+ // decoding) of media data, then fires |cdm_attached_cb| with the result.
+ virtual void SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) = 0;
// The following functions must be called after Initialize().
@@ -51,7 +65,7 @@ class MEDIA_EXPORT Renderer {
virtual void StartPlayingFrom(base::TimeDelta time) = 0;
// Updates the current playback rate. The default playback rate should be 1.
- virtual void SetPlaybackRate(float playback_rate) = 0;
+ virtual void SetPlaybackRate(double playback_rate) = 0;
// Sets the output volume. The default volume should be 1.
virtual void SetVolume(float volume) = 0;
@@ -65,9 +79,6 @@ class MEDIA_EXPORT Renderer {
// Returns whether |this| renders video.
virtual bool HasVideo() = 0;
- // Associates the |cdm| with this Renderer.
- virtual void SetCdm(MediaKeys* cdm) = 0;
-
private:
DISALLOW_COPY_AND_ASSIGN(Renderer);
};
diff --git a/chromium/media/base/renderer_factory.cc b/chromium/media/base/renderer_factory.cc
new file mode 100644
index 00000000000..2824baabb6b
--- /dev/null
+++ b/chromium/media/base/renderer_factory.cc
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/renderer_factory.h"
+
+namespace media {
+
+RendererFactory::RendererFactory() {
+}
+
+RendererFactory::~RendererFactory() {
+}
+
+} // namespace media
diff --git a/chromium/media/base/renderer_factory.h b/chromium/media/base/renderer_factory.h
new file mode 100644
index 00000000000..aea3a53496e
--- /dev/null
+++ b/chromium/media/base/renderer_factory.h
@@ -0,0 +1,45 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_RENDERER_FACTORY_H_
+#define MEDIA_BASE_RENDERER_FACTORY_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+#include "media/base/renderer.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+class AudioRendererSink;
+class VideoRendererSink;
+
+// A factory class for creating media::Renderer to be used by media pipeline.
+class MEDIA_EXPORT RendererFactory {
+ public:
+ RendererFactory();
+ virtual ~RendererFactory();
+
+ // Creates and returns a Renderer. All methods of the created Renderer except
+ // for GetMediaTime() will be called on the |media_task_runner|.
+ // GetMediaTime() could be called on any thread.
+ // The created Renderer can use |audio_renderer_sink| to render audio and
+ // |video_renderer_sink| to render video.
+ virtual scoped_ptr<Renderer> CreateRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ AudioRendererSink* audio_renderer_sink,
+ VideoRendererSink* video_renderer_sink) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RendererFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_RENDERER_FACTORY_H_
diff --git a/chromium/media/base/sample_format.cc b/chromium/media/base/sample_format.cc
index cf8f20f5632..464fc1b6f57 100644
--- a/chromium/media/base/sample_format.cc
+++ b/chromium/media/base/sample_format.cc
@@ -20,6 +20,7 @@ int SampleFormatToBytesPerChannel(SampleFormat sample_format) {
case kSampleFormatS32:
case kSampleFormatF32:
case kSampleFormatPlanarF32:
+ case kSampleFormatPlanarS32:
return 4;
}
@@ -43,6 +44,8 @@ const char* SampleFormatToString(SampleFormat sample_format) {
return "Signed 16-bit planar";
case kSampleFormatPlanarF32:
return "Float 32-bit planar";
+ case kSampleFormatPlanarS32:
+ return "Signed 32-bit planar";
}
NOTREACHED() << "Invalid sample format provided: " << sample_format;
return "";
diff --git a/chromium/media/base/sample_format.h b/chromium/media/base/sample_format.h
index 7c3df702157..7a488433ab8 100644
--- a/chromium/media/base/sample_format.h
+++ b/chromium/media/base/sample_format.h
@@ -21,9 +21,10 @@ enum SampleFormat {
kSampleFormatF32, // Float 32-bit.
kSampleFormatPlanarS16, // Signed 16-bit planar.
kSampleFormatPlanarF32, // Float 32-bit planar.
+ kSampleFormatPlanarS32, // Signed 32-bit planar.
// Must always be equal to largest value ever logged.
- kSampleFormatMax = kSampleFormatPlanarF32,
+ kSampleFormatMax = kSampleFormatPlanarS32,
};
// Returns the number of bytes used per channel for the specified
diff --git a/chromium/media/base/scoped_histogram_timer.h b/chromium/media/base/scoped_histogram_timer.h
deleted file mode 100644
index a7f69b0ab9b..00000000000
--- a/chromium/media/base/scoped_histogram_timer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SCOPED_HISTOGRAM_TIMER_H_
-#define MEDIA_BASE_SCOPED_HISTOGRAM_TIMER_H_
-
-#include "base/metrics/histogram.h"
-#include "base/time/time.h"
-
-// Scoped class which logs its time on this earth as a UMA statistic. Must be
-// a #define macro since UMA macros prevent variables as names. The nested
-// macro is necessary to expand __COUNTER__ to an actual value.
-#define SCOPED_UMA_HISTOGRAM_TIMER(name) \
- SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, __COUNTER__)
-
-#define SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, key) \
- SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, key)
-
-#define SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, key) \
- class ScopedHistogramTimer##key { \
- public: \
- ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {} \
- ~ScopedHistogramTimer##key() { \
- base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_; \
- UMA_HISTOGRAM_TIMES(name, elapsed); \
- } \
- private: \
- base::TimeTicks constructed_; \
- } scoped_histogram_timer_##key
-
-#endif // MEDIA_BASE_SCOPED_HISTOGRAM_TIMER_H_
diff --git a/chromium/media/base/scoped_histogram_timer_unittest.cc b/chromium/media/base/scoped_histogram_timer_unittest.cc
deleted file mode 100644
index 47e228eb111..00000000000
--- a/chromium/media/base/scoped_histogram_timer_unittest.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/time/time.h"
-#include "media/base/scoped_histogram_timer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-TEST(ScopedHistogramTimer, TwoTimersOneScope) {
- SCOPED_UMA_HISTOGRAM_TIMER("TestTimer0");
- SCOPED_UMA_HISTOGRAM_TIMER("TestTimer1");
-}
-
-} // namespace media
diff --git a/chromium/media/base/serial_runner.cc b/chromium/media/base/serial_runner.cc
index 779566c7941..9e5993071e4 100644
--- a/chromium/media/base/serial_runner.cc
+++ b/chromium/media/base/serial_runner.cc
@@ -7,8 +7,8 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
namespace media {
@@ -68,7 +68,7 @@ bool SerialRunner::Queue::empty() {
SerialRunner::SerialRunner(const Queue& bound_fns,
const PipelineStatusCB& done_cb)
- : task_runner_(base::MessageLoopProxy::current()),
+ : task_runner_(base::ThreadTaskRunnerHandle::Get()),
bound_fns_(bound_fns),
done_cb_(done_cb),
weak_factory_(this) {
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc b/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
index 14778bbd58d..1b07598e4db 100644
--- a/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
@@ -4,7 +4,6 @@
#include "build/build_config.h"
#include "media/base/simd/convert_rgb_to_yuv.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
#if defined(COMPILER_MSVC)
#include <intrin.h>
@@ -13,6 +12,12 @@
#include <emmintrin.h>
#endif
+#if defined(COMPILER_MSVC)
+#define SIMD_ALIGNED(var) __declspec(align(16)) var
+#else
+#define SIMD_ALIGNED(var) var __attribute__((aligned(16)))
+#endif
+
namespace media {
#define FIX_SHIFT 12
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb.h b/chromium/media/base/simd/convert_yuv_to_rgb.h
index 54337bcd2cb..7feb0079c38 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb.h
+++ b/chromium/media/base/simd/convert_yuv_to_rgb.h
@@ -29,7 +29,7 @@ MEDIA_EXPORT void ConvertYUVToRGB32Row_C(const uint8* yplane,
const uint8* vplane,
uint8* rgbframe,
ptrdiff_t width,
- const int16 convert_table[1024][4]);
+ const int16* convert_table);
MEDIA_EXPORT void ConvertYUVAToARGB_C(const uint8* yplane,
const uint8* uplane,
@@ -50,7 +50,7 @@ MEDIA_EXPORT void ConvertYUVAToARGBRow_C(const uint8* yplane,
const uint8* aplane,
uint8* rgbframe,
ptrdiff_t width,
- const int16 convert_table[1024][4]);
+ const int16* convert_table);
MEDIA_EXPORT void ConvertYUVToRGB32_SSE(const uint8* yplane,
const uint8* uplane,
@@ -82,16 +82,15 @@ MEDIA_EXPORT void ScaleYUVToRGB32Row_C(const uint8* y_buf,
uint8* rgb_buf,
ptrdiff_t width,
ptrdiff_t source_dx,
- const int16 convert_table[1024][4]);
+ const int16* convert_table);
-MEDIA_EXPORT void LinearScaleYUVToRGB32Row_C(
- const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx,
- const int16 convert_table[1024][4]);
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16* convert_table);
MEDIA_EXPORT void LinearScaleYUVToRGB32RowWithRange_C(
const uint8* y_buf,
@@ -101,7 +100,7 @@ MEDIA_EXPORT void LinearScaleYUVToRGB32RowWithRange_C(
int dest_width,
int source_x,
int source_dx,
- const int16 convert_table[1024][4]);
+ const int16* convert_table);
} // namespace media
@@ -121,14 +120,14 @@ MEDIA_EXPORT void ConvertYUVAToARGBRow_MMX(const uint8* yplane,
const uint8* aplane,
uint8* rgbframe,
ptrdiff_t width,
- const int16 convert_table[1024][4]);
+ const int16* convert_table);
MEDIA_EXPORT void ConvertYUVToRGB32Row_SSE(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
uint8* rgbframe,
ptrdiff_t width,
- const int16 convert_table[1024][4]);
+ const int16* convert_table);
MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE(const uint8* y_buf,
const uint8* u_buf,
@@ -136,34 +135,31 @@ MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE(const uint8* y_buf,
uint8* rgb_buf,
ptrdiff_t width,
ptrdiff_t source_dx,
- const int16 convert_table[1024][4]);
-
-MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE2_X64(
- const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx,
- const int16 convert_table[1024][4]);
-
-MEDIA_EXPORT void LinearScaleYUVToRGB32Row_SSE(
- const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx,
- const int16 convert_table[1024][4]);
-
-MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX_X64(
- const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx,
- const int16 convert_table[1024][4]);
+ const int16* convert_table);
+
+MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE2_X64(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16* convert_table);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_SSE(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16* convert_table);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX_X64(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16* convert_table);
} // extern "C"
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_c.cc b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
index 9d6476b07d5..370f80e783b 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "media/base/simd/convert_yuv_to_rgb.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
namespace media {
@@ -39,21 +38,21 @@ static inline void ConvertYUVToRGB32_C(uint8 y,
uint8 u,
uint8 v,
uint8* rgb_buf,
- const int16 convert_table[1024][4]) {
- int b = convert_table[256+u][B_INDEX];
- int g = convert_table[256+u][G_INDEX];
- int r = convert_table[256+u][R_INDEX];
- int a = convert_table[256+u][A_INDEX];
+ const int16* convert_table) {
+ int b = convert_table[4 * (256 + u) + B_INDEX];
+ int g = convert_table[4 * (256 + u) + G_INDEX];
+ int r = convert_table[4 * (256 + u) + R_INDEX];
+ int a = convert_table[4 * (256 + u) + A_INDEX];
- b = paddsw(b, convert_table[512+v][B_INDEX]);
- g = paddsw(g, convert_table[512+v][G_INDEX]);
- r = paddsw(r, convert_table[512+v][R_INDEX]);
- a = paddsw(a, convert_table[512+v][A_INDEX]);
+ b = paddsw(b, convert_table[4 * (512 + v) + B_INDEX]);
+ g = paddsw(g, convert_table[4 * (512 + v) + G_INDEX]);
+ r = paddsw(r, convert_table[4 * (512 + v) + R_INDEX]);
+ a = paddsw(a, convert_table[4 * (512 + v) + A_INDEX]);
- b = paddsw(b, convert_table[y][B_INDEX]);
- g = paddsw(g, convert_table[y][G_INDEX]);
- r = paddsw(r, convert_table[y][R_INDEX]);
- a = paddsw(a, convert_table[y][A_INDEX]);
+ b = paddsw(b, convert_table[4 * y + B_INDEX]);
+ g = paddsw(g, convert_table[4 * y + G_INDEX]);
+ r = paddsw(r, convert_table[4 * y + R_INDEX]);
+ a = paddsw(a, convert_table[4 * y + A_INDEX]);
b >>= 6;
g >>= 6;
@@ -71,18 +70,18 @@ static inline void ConvertYUVAToARGB_C(uint8 y,
uint8 v,
uint8 a,
uint8* rgb_buf,
- const int16 convert_table[1024][4]) {
- int b = convert_table[256+u][0];
- int g = convert_table[256+u][1];
- int r = convert_table[256+u][2];
+ const int16* convert_table) {
+ int b = convert_table[4 * (256 + u) + 0];
+ int g = convert_table[4 * (256 + u) + 1];
+ int r = convert_table[4 * (256 + u) + 2];
- b = paddsw(b, convert_table[512+v][0]);
- g = paddsw(g, convert_table[512+v][1]);
- r = paddsw(r, convert_table[512+v][2]);
+ b = paddsw(b, convert_table[4 * (512 + v) + 0]);
+ g = paddsw(g, convert_table[4 * (512 + v) + 1]);
+ r = paddsw(r, convert_table[4 * (512 + v) + 2]);
- b = paddsw(b, convert_table[y][0]);
- g = paddsw(g, convert_table[y][1]);
- r = paddsw(r, convert_table[y][2]);
+ b = paddsw(b, convert_table[4 * y + 0]);
+ g = paddsw(g, convert_table[4 * y + 1]);
+ r = paddsw(r, convert_table[4 * y + 2]);
b >>= 6;
g >>= 6;
@@ -103,7 +102,7 @@ void ConvertYUVToRGB32Row_C(const uint8* y_buf,
const uint8* v_buf,
uint8* rgb_buf,
ptrdiff_t width,
- const int16 convert_table[1024][4]) {
+ const int16* convert_table) {
for (int x = 0; x < width; x += 2) {
uint8 u = u_buf[x >> 1];
uint8 v = v_buf[x >> 1];
@@ -123,7 +122,7 @@ void ConvertYUVAToARGBRow_C(const uint8* y_buf,
const uint8* a_buf,
uint8* rgba_buf,
ptrdiff_t width,
- const int16 convert_table[1024][4]) {
+ const int16* convert_table) {
for (int x = 0; x < width; x += 2) {
uint8 u = u_buf[x >> 1];
uint8 v = v_buf[x >> 1];
@@ -149,7 +148,7 @@ void ScaleYUVToRGB32Row_C(const uint8* y_buf,
uint8* rgb_buf,
ptrdiff_t width,
ptrdiff_t source_dx,
- const int16 convert_table[1024][4]) {
+ const int16* convert_table) {
int x = 0;
for (int i = 0; i < width; i += 2) {
int y = y_buf[x >> 16];
@@ -172,7 +171,7 @@ void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
uint8* rgb_buf,
ptrdiff_t width,
ptrdiff_t source_dx,
- const int16 convert_table[1024][4]) {
+ const int16* convert_table) {
// Avoid point-sampling for down-scaling by > 2:1.
int source_x = 0;
if (source_dx >= 0x20000)
@@ -188,7 +187,7 @@ void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
int dest_width,
int x,
int source_dx,
- const int16 convert_table[1024][4]) {
+ const int16* convert_table) {
for (int i = 0; i < dest_width; i += 2) {
int y0 = y_buf[x >> 16];
int y1 = y_buf[(x >> 16) + 1];
@@ -226,6 +225,7 @@ void ConvertYUVToRGB32_C(const uint8* yplane,
int rgbstride,
YUVType yuv_type) {
unsigned int y_shift = GetVerticalShift(yuv_type);
+ const int16* lookup_table = GetLookupTable(yuv_type);
for (int y = 0; y < height; ++y) {
uint8* rgb_row = rgbframe + y * rgbstride;
const uint8* y_ptr = yplane + y * ystride;
@@ -237,7 +237,7 @@ void ConvertYUVToRGB32_C(const uint8* yplane,
v_ptr,
rgb_row,
width,
- GetLookupTable(yuv_type));
+ lookup_table);
}
}
@@ -253,7 +253,8 @@ void ConvertYUVAToARGB_C(const uint8* yplane,
int astride,
int rgbastride,
YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
+ unsigned int y_shift = GetVerticalShift(yuv_type);
+ const int16* lookup_table = GetLookupTable(yuv_type);
for (int y = 0; y < height; y++) {
uint8* rgba_row = rgbaframe + y * rgbastride;
const uint8* y_ptr = yplane + y * ystride;
@@ -267,7 +268,7 @@ void ConvertYUVAToARGB_C(const uint8* yplane,
a_ptr,
rgba_row,
width,
- GetLookupTable(yuv_type));
+ lookup_table);
}
}
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm b/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
index 44b123fcdd1..890c2929095 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
@@ -19,6 +19,6 @@
; const uint8* v_buf,
; uint8* rgb_buf,
; ptrdiff_t width);
-; const int16 convert_table[1024][4]);
+; const int16* convert_table);
%define SYMBOL ConvertYUVToRGB32Row_SSE
%include "convert_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc b/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
index 2e6f5e30ab0..043170a6140 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
@@ -9,7 +9,6 @@
#endif
#include "media/base/simd/convert_yuv_to_rgb.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
#include "media/base/yuv_convert.h"
namespace media {
diff --git a/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm b/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
index 395f326abbf..5c263d5dd5a 100644
--- a/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
+++ b/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
@@ -19,6 +19,6 @@
; const uint8* a_buf,
; uint8* rgb_buf,
; ptrdiff_t width);
-; const int16 convert_table[1024][4]);
+; const int16* convert_table);
%define SYMBOL ConvertYUVAToARGBRow_MMX
%include "convert_yuva_to_argb_mmx.inc"
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
index bf2f7080520..a85a932a0e6 100644
--- a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
@@ -19,6 +19,6 @@
; uint8* rgb_buf,
; ptrdiff_t width,
; ptrdiff_t source_dx);
-; const int16 convert_table[1024][4]);
+; const int16* convert_table);
%define SYMBOL LinearScaleYUVToRGB32Row_MMX
%include "linear_scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
index 89e4e2a6825..03213f2686d 100644
--- a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
@@ -23,7 +23,6 @@
mangle(SYMBOL):
%assign stack_offset 0
- extern mangle(kCoefficientsRgbY)
; Parameters are in the following order:
; 1. Y plane
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
index 122365149fc..d6b82f23506 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
@@ -19,6 +19,6 @@
; uint8* rgb_buf,
; ptrdiff_t width,
; ptrdiff_t source_dx);
-; const int16 convert_table[1024][4]);
+; const int16* convert_table);
%define SYMBOL ScaleYUVToRGB32Row_MMX
%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
index 60351db557d..42782cbae12 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
@@ -10,8 +10,6 @@
mangle(SYMBOL):
%assign stack_offset 0
- extern mangle(kCoefficientsRgbY)
-
; Parameters are in the following order:
; 1. Y plane
; 2. U plane
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm b/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
index fc98bbe6bda..06ba6c24026 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
@@ -19,6 +19,6 @@
; uint8* rgb_buf,
; ptrdiff_t width,
; ptrdiff_t source_dx);
-; const int16 convert_table[1024][4]);
+; const int16* convert_table);
%define SYMBOL ScaleYUVToRGB32Row_SSE
%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm b/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
index cf0d140dbfd..8f7345ca9ec 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
@@ -23,7 +23,6 @@
mangle(SYMBOL):
%assign stack_offset 0
- extern mangle(kCoefficientsRgbY)
; Parameters are in the following order:
; 1. Y plane
diff --git a/chromium/media/base/simd/yuv_to_rgb_table.cc b/chromium/media/base/simd/yuv_to_rgb_table.cc
deleted file mode 100644
index 5bc35aff89a..00000000000
--- a/chromium/media/base/simd/yuv_to_rgb_table.cc
+++ /dev/null
@@ -1,669 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/simd/yuv_to_rgb_table.h"
-
-extern "C" {
-
-// Defines the R,G,B,A contributions from Y.
-#define RGBY(i) { \
- static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
- static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
- static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
- 0 \
-}
-
-// Defines the R,G,B,A contributions from U.
-// The contribution to A is the same for any value of U
-// causing the final A value to be 255 in every conversion.
-// Android's pixel layout is RGBA, while other platforms
-// are BGRA.
-#if defined(OS_ANDROID)
-#define RGBU(i) { \
- 0, \
- static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(256 * 64 - 1) \
-}
-#else
-#define RGBU(i) { \
- static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
- 0, \
- static_cast<int16>(256 * 64 - 1) \
-}
-#endif
-
-// Defines the R,G,B,A contributions from V.
-// Android's pixel layout is RGBA, while other platforms
-// are BGRA.
-#if defined(OS_ANDROID)
-#define RGBV(i) { \
- static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
- 0, \
- 0 \
-}
-#else
-#define RGBV(i) { \
- 0, \
- static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
- 0 \
-}
-#endif
-
-// Used to define a set of multiplier words for each alpha level.
-#define ALPHA(i) { \
- i, i, i, i \
-}
-
-// The following table defines the RGBA contributions
-// for each component of YUVA. The Y table is first followed
-// by the U, and V tables. The alpha multiplier table follows.
-// These tables are aligned and kept adjacent to optimize for
-// SIMD and cacheing.
-
-SIMD_ALIGNED(const int16 kCoefficientsRgbY[256 * 4][4]) = {
- RGBY(0x00), RGBY(0x01), RGBY(0x02), RGBY(0x03),
- RGBY(0x04), RGBY(0x05), RGBY(0x06), RGBY(0x07),
- RGBY(0x08), RGBY(0x09), RGBY(0x0A), RGBY(0x0B),
- RGBY(0x0C), RGBY(0x0D), RGBY(0x0E), RGBY(0x0F),
- RGBY(0x10), RGBY(0x11), RGBY(0x12), RGBY(0x13),
- RGBY(0x14), RGBY(0x15), RGBY(0x16), RGBY(0x17),
- RGBY(0x18), RGBY(0x19), RGBY(0x1A), RGBY(0x1B),
- RGBY(0x1C), RGBY(0x1D), RGBY(0x1E), RGBY(0x1F),
- RGBY(0x20), RGBY(0x21), RGBY(0x22), RGBY(0x23),
- RGBY(0x24), RGBY(0x25), RGBY(0x26), RGBY(0x27),
- RGBY(0x28), RGBY(0x29), RGBY(0x2A), RGBY(0x2B),
- RGBY(0x2C), RGBY(0x2D), RGBY(0x2E), RGBY(0x2F),
- RGBY(0x30), RGBY(0x31), RGBY(0x32), RGBY(0x33),
- RGBY(0x34), RGBY(0x35), RGBY(0x36), RGBY(0x37),
- RGBY(0x38), RGBY(0x39), RGBY(0x3A), RGBY(0x3B),
- RGBY(0x3C), RGBY(0x3D), RGBY(0x3E), RGBY(0x3F),
- RGBY(0x40), RGBY(0x41), RGBY(0x42), RGBY(0x43),
- RGBY(0x44), RGBY(0x45), RGBY(0x46), RGBY(0x47),
- RGBY(0x48), RGBY(0x49), RGBY(0x4A), RGBY(0x4B),
- RGBY(0x4C), RGBY(0x4D), RGBY(0x4E), RGBY(0x4F),
- RGBY(0x50), RGBY(0x51), RGBY(0x52), RGBY(0x53),
- RGBY(0x54), RGBY(0x55), RGBY(0x56), RGBY(0x57),
- RGBY(0x58), RGBY(0x59), RGBY(0x5A), RGBY(0x5B),
- RGBY(0x5C), RGBY(0x5D), RGBY(0x5E), RGBY(0x5F),
- RGBY(0x60), RGBY(0x61), RGBY(0x62), RGBY(0x63),
- RGBY(0x64), RGBY(0x65), RGBY(0x66), RGBY(0x67),
- RGBY(0x68), RGBY(0x69), RGBY(0x6A), RGBY(0x6B),
- RGBY(0x6C), RGBY(0x6D), RGBY(0x6E), RGBY(0x6F),
- RGBY(0x70), RGBY(0x71), RGBY(0x72), RGBY(0x73),
- RGBY(0x74), RGBY(0x75), RGBY(0x76), RGBY(0x77),
- RGBY(0x78), RGBY(0x79), RGBY(0x7A), RGBY(0x7B),
- RGBY(0x7C), RGBY(0x7D), RGBY(0x7E), RGBY(0x7F),
- RGBY(0x80), RGBY(0x81), RGBY(0x82), RGBY(0x83),
- RGBY(0x84), RGBY(0x85), RGBY(0x86), RGBY(0x87),
- RGBY(0x88), RGBY(0x89), RGBY(0x8A), RGBY(0x8B),
- RGBY(0x8C), RGBY(0x8D), RGBY(0x8E), RGBY(0x8F),
- RGBY(0x90), RGBY(0x91), RGBY(0x92), RGBY(0x93),
- RGBY(0x94), RGBY(0x95), RGBY(0x96), RGBY(0x97),
- RGBY(0x98), RGBY(0x99), RGBY(0x9A), RGBY(0x9B),
- RGBY(0x9C), RGBY(0x9D), RGBY(0x9E), RGBY(0x9F),
- RGBY(0xA0), RGBY(0xA1), RGBY(0xA2), RGBY(0xA3),
- RGBY(0xA4), RGBY(0xA5), RGBY(0xA6), RGBY(0xA7),
- RGBY(0xA8), RGBY(0xA9), RGBY(0xAA), RGBY(0xAB),
- RGBY(0xAC), RGBY(0xAD), RGBY(0xAE), RGBY(0xAF),
- RGBY(0xB0), RGBY(0xB1), RGBY(0xB2), RGBY(0xB3),
- RGBY(0xB4), RGBY(0xB5), RGBY(0xB6), RGBY(0xB7),
- RGBY(0xB8), RGBY(0xB9), RGBY(0xBA), RGBY(0xBB),
- RGBY(0xBC), RGBY(0xBD), RGBY(0xBE), RGBY(0xBF),
- RGBY(0xC0), RGBY(0xC1), RGBY(0xC2), RGBY(0xC3),
- RGBY(0xC4), RGBY(0xC5), RGBY(0xC6), RGBY(0xC7),
- RGBY(0xC8), RGBY(0xC9), RGBY(0xCA), RGBY(0xCB),
- RGBY(0xCC), RGBY(0xCD), RGBY(0xCE), RGBY(0xCF),
- RGBY(0xD0), RGBY(0xD1), RGBY(0xD2), RGBY(0xD3),
- RGBY(0xD4), RGBY(0xD5), RGBY(0xD6), RGBY(0xD7),
- RGBY(0xD8), RGBY(0xD9), RGBY(0xDA), RGBY(0xDB),
- RGBY(0xDC), RGBY(0xDD), RGBY(0xDE), RGBY(0xDF),
- RGBY(0xE0), RGBY(0xE1), RGBY(0xE2), RGBY(0xE3),
- RGBY(0xE4), RGBY(0xE5), RGBY(0xE6), RGBY(0xE7),
- RGBY(0xE8), RGBY(0xE9), RGBY(0xEA), RGBY(0xEB),
- RGBY(0xEC), RGBY(0xED), RGBY(0xEE), RGBY(0xEF),
- RGBY(0xF0), RGBY(0xF1), RGBY(0xF2), RGBY(0xF3),
- RGBY(0xF4), RGBY(0xF5), RGBY(0xF6), RGBY(0xF7),
- RGBY(0xF8), RGBY(0xF9), RGBY(0xFA), RGBY(0xFB),
- RGBY(0xFC), RGBY(0xFD), RGBY(0xFE), RGBY(0xFF),
-
- // Chroma U table.
- RGBU(0x00), RGBU(0x01), RGBU(0x02), RGBU(0x03),
- RGBU(0x04), RGBU(0x05), RGBU(0x06), RGBU(0x07),
- RGBU(0x08), RGBU(0x09), RGBU(0x0A), RGBU(0x0B),
- RGBU(0x0C), RGBU(0x0D), RGBU(0x0E), RGBU(0x0F),
- RGBU(0x10), RGBU(0x11), RGBU(0x12), RGBU(0x13),
- RGBU(0x14), RGBU(0x15), RGBU(0x16), RGBU(0x17),
- RGBU(0x18), RGBU(0x19), RGBU(0x1A), RGBU(0x1B),
- RGBU(0x1C), RGBU(0x1D), RGBU(0x1E), RGBU(0x1F),
- RGBU(0x20), RGBU(0x21), RGBU(0x22), RGBU(0x23),
- RGBU(0x24), RGBU(0x25), RGBU(0x26), RGBU(0x27),
- RGBU(0x28), RGBU(0x29), RGBU(0x2A), RGBU(0x2B),
- RGBU(0x2C), RGBU(0x2D), RGBU(0x2E), RGBU(0x2F),
- RGBU(0x30), RGBU(0x31), RGBU(0x32), RGBU(0x33),
- RGBU(0x34), RGBU(0x35), RGBU(0x36), RGBU(0x37),
- RGBU(0x38), RGBU(0x39), RGBU(0x3A), RGBU(0x3B),
- RGBU(0x3C), RGBU(0x3D), RGBU(0x3E), RGBU(0x3F),
- RGBU(0x40), RGBU(0x41), RGBU(0x42), RGBU(0x43),
- RGBU(0x44), RGBU(0x45), RGBU(0x46), RGBU(0x47),
- RGBU(0x48), RGBU(0x49), RGBU(0x4A), RGBU(0x4B),
- RGBU(0x4C), RGBU(0x4D), RGBU(0x4E), RGBU(0x4F),
- RGBU(0x50), RGBU(0x51), RGBU(0x52), RGBU(0x53),
- RGBU(0x54), RGBU(0x55), RGBU(0x56), RGBU(0x57),
- RGBU(0x58), RGBU(0x59), RGBU(0x5A), RGBU(0x5B),
- RGBU(0x5C), RGBU(0x5D), RGBU(0x5E), RGBU(0x5F),
- RGBU(0x60), RGBU(0x61), RGBU(0x62), RGBU(0x63),
- RGBU(0x64), RGBU(0x65), RGBU(0x66), RGBU(0x67),
- RGBU(0x68), RGBU(0x69), RGBU(0x6A), RGBU(0x6B),
- RGBU(0x6C), RGBU(0x6D), RGBU(0x6E), RGBU(0x6F),
- RGBU(0x70), RGBU(0x71), RGBU(0x72), RGBU(0x73),
- RGBU(0x74), RGBU(0x75), RGBU(0x76), RGBU(0x77),
- RGBU(0x78), RGBU(0x79), RGBU(0x7A), RGBU(0x7B),
- RGBU(0x7C), RGBU(0x7D), RGBU(0x7E), RGBU(0x7F),
- RGBU(0x80), RGBU(0x81), RGBU(0x82), RGBU(0x83),
- RGBU(0x84), RGBU(0x85), RGBU(0x86), RGBU(0x87),
- RGBU(0x88), RGBU(0x89), RGBU(0x8A), RGBU(0x8B),
- RGBU(0x8C), RGBU(0x8D), RGBU(0x8E), RGBU(0x8F),
- RGBU(0x90), RGBU(0x91), RGBU(0x92), RGBU(0x93),
- RGBU(0x94), RGBU(0x95), RGBU(0x96), RGBU(0x97),
- RGBU(0x98), RGBU(0x99), RGBU(0x9A), RGBU(0x9B),
- RGBU(0x9C), RGBU(0x9D), RGBU(0x9E), RGBU(0x9F),
- RGBU(0xA0), RGBU(0xA1), RGBU(0xA2), RGBU(0xA3),
- RGBU(0xA4), RGBU(0xA5), RGBU(0xA6), RGBU(0xA7),
- RGBU(0xA8), RGBU(0xA9), RGBU(0xAA), RGBU(0xAB),
- RGBU(0xAC), RGBU(0xAD), RGBU(0xAE), RGBU(0xAF),
- RGBU(0xB0), RGBU(0xB1), RGBU(0xB2), RGBU(0xB3),
- RGBU(0xB4), RGBU(0xB5), RGBU(0xB6), RGBU(0xB7),
- RGBU(0xB8), RGBU(0xB9), RGBU(0xBA), RGBU(0xBB),
- RGBU(0xBC), RGBU(0xBD), RGBU(0xBE), RGBU(0xBF),
- RGBU(0xC0), RGBU(0xC1), RGBU(0xC2), RGBU(0xC3),
- RGBU(0xC4), RGBU(0xC5), RGBU(0xC6), RGBU(0xC7),
- RGBU(0xC8), RGBU(0xC9), RGBU(0xCA), RGBU(0xCB),
- RGBU(0xCC), RGBU(0xCD), RGBU(0xCE), RGBU(0xCF),
- RGBU(0xD0), RGBU(0xD1), RGBU(0xD2), RGBU(0xD3),
- RGBU(0xD4), RGBU(0xD5), RGBU(0xD6), RGBU(0xD7),
- RGBU(0xD8), RGBU(0xD9), RGBU(0xDA), RGBU(0xDB),
- RGBU(0xDC), RGBU(0xDD), RGBU(0xDE), RGBU(0xDF),
- RGBU(0xE0), RGBU(0xE1), RGBU(0xE2), RGBU(0xE3),
- RGBU(0xE4), RGBU(0xE5), RGBU(0xE6), RGBU(0xE7),
- RGBU(0xE8), RGBU(0xE9), RGBU(0xEA), RGBU(0xEB),
- RGBU(0xEC), RGBU(0xED), RGBU(0xEE), RGBU(0xEF),
- RGBU(0xF0), RGBU(0xF1), RGBU(0xF2), RGBU(0xF3),
- RGBU(0xF4), RGBU(0xF5), RGBU(0xF6), RGBU(0xF7),
- RGBU(0xF8), RGBU(0xF9), RGBU(0xFA), RGBU(0xFB),
- RGBU(0xFC), RGBU(0xFD), RGBU(0xFE), RGBU(0xFF),
-
- // Chroma V table.
- RGBV(0x00), RGBV(0x01), RGBV(0x02), RGBV(0x03),
- RGBV(0x04), RGBV(0x05), RGBV(0x06), RGBV(0x07),
- RGBV(0x08), RGBV(0x09), RGBV(0x0A), RGBV(0x0B),
- RGBV(0x0C), RGBV(0x0D), RGBV(0x0E), RGBV(0x0F),
- RGBV(0x10), RGBV(0x11), RGBV(0x12), RGBV(0x13),
- RGBV(0x14), RGBV(0x15), RGBV(0x16), RGBV(0x17),
- RGBV(0x18), RGBV(0x19), RGBV(0x1A), RGBV(0x1B),
- RGBV(0x1C), RGBV(0x1D), RGBV(0x1E), RGBV(0x1F),
- RGBV(0x20), RGBV(0x21), RGBV(0x22), RGBV(0x23),
- RGBV(0x24), RGBV(0x25), RGBV(0x26), RGBV(0x27),
- RGBV(0x28), RGBV(0x29), RGBV(0x2A), RGBV(0x2B),
- RGBV(0x2C), RGBV(0x2D), RGBV(0x2E), RGBV(0x2F),
- RGBV(0x30), RGBV(0x31), RGBV(0x32), RGBV(0x33),
- RGBV(0x34), RGBV(0x35), RGBV(0x36), RGBV(0x37),
- RGBV(0x38), RGBV(0x39), RGBV(0x3A), RGBV(0x3B),
- RGBV(0x3C), RGBV(0x3D), RGBV(0x3E), RGBV(0x3F),
- RGBV(0x40), RGBV(0x41), RGBV(0x42), RGBV(0x43),
- RGBV(0x44), RGBV(0x45), RGBV(0x46), RGBV(0x47),
- RGBV(0x48), RGBV(0x49), RGBV(0x4A), RGBV(0x4B),
- RGBV(0x4C), RGBV(0x4D), RGBV(0x4E), RGBV(0x4F),
- RGBV(0x50), RGBV(0x51), RGBV(0x52), RGBV(0x53),
- RGBV(0x54), RGBV(0x55), RGBV(0x56), RGBV(0x57),
- RGBV(0x58), RGBV(0x59), RGBV(0x5A), RGBV(0x5B),
- RGBV(0x5C), RGBV(0x5D), RGBV(0x5E), RGBV(0x5F),
- RGBV(0x60), RGBV(0x61), RGBV(0x62), RGBV(0x63),
- RGBV(0x64), RGBV(0x65), RGBV(0x66), RGBV(0x67),
- RGBV(0x68), RGBV(0x69), RGBV(0x6A), RGBV(0x6B),
- RGBV(0x6C), RGBV(0x6D), RGBV(0x6E), RGBV(0x6F),
- RGBV(0x70), RGBV(0x71), RGBV(0x72), RGBV(0x73),
- RGBV(0x74), RGBV(0x75), RGBV(0x76), RGBV(0x77),
- RGBV(0x78), RGBV(0x79), RGBV(0x7A), RGBV(0x7B),
- RGBV(0x7C), RGBV(0x7D), RGBV(0x7E), RGBV(0x7F),
- RGBV(0x80), RGBV(0x81), RGBV(0x82), RGBV(0x83),
- RGBV(0x84), RGBV(0x85), RGBV(0x86), RGBV(0x87),
- RGBV(0x88), RGBV(0x89), RGBV(0x8A), RGBV(0x8B),
- RGBV(0x8C), RGBV(0x8D), RGBV(0x8E), RGBV(0x8F),
- RGBV(0x90), RGBV(0x91), RGBV(0x92), RGBV(0x93),
- RGBV(0x94), RGBV(0x95), RGBV(0x96), RGBV(0x97),
- RGBV(0x98), RGBV(0x99), RGBV(0x9A), RGBV(0x9B),
- RGBV(0x9C), RGBV(0x9D), RGBV(0x9E), RGBV(0x9F),
- RGBV(0xA0), RGBV(0xA1), RGBV(0xA2), RGBV(0xA3),
- RGBV(0xA4), RGBV(0xA5), RGBV(0xA6), RGBV(0xA7),
- RGBV(0xA8), RGBV(0xA9), RGBV(0xAA), RGBV(0xAB),
- RGBV(0xAC), RGBV(0xAD), RGBV(0xAE), RGBV(0xAF),
- RGBV(0xB0), RGBV(0xB1), RGBV(0xB2), RGBV(0xB3),
- RGBV(0xB4), RGBV(0xB5), RGBV(0xB6), RGBV(0xB7),
- RGBV(0xB8), RGBV(0xB9), RGBV(0xBA), RGBV(0xBB),
- RGBV(0xBC), RGBV(0xBD), RGBV(0xBE), RGBV(0xBF),
- RGBV(0xC0), RGBV(0xC1), RGBV(0xC2), RGBV(0xC3),
- RGBV(0xC4), RGBV(0xC5), RGBV(0xC6), RGBV(0xC7),
- RGBV(0xC8), RGBV(0xC9), RGBV(0xCA), RGBV(0xCB),
- RGBV(0xCC), RGBV(0xCD), RGBV(0xCE), RGBV(0xCF),
- RGBV(0xD0), RGBV(0xD1), RGBV(0xD2), RGBV(0xD3),
- RGBV(0xD4), RGBV(0xD5), RGBV(0xD6), RGBV(0xD7),
- RGBV(0xD8), RGBV(0xD9), RGBV(0xDA), RGBV(0xDB),
- RGBV(0xDC), RGBV(0xDD), RGBV(0xDE), RGBV(0xDF),
- RGBV(0xE0), RGBV(0xE1), RGBV(0xE2), RGBV(0xE3),
- RGBV(0xE4), RGBV(0xE5), RGBV(0xE6), RGBV(0xE7),
- RGBV(0xE8), RGBV(0xE9), RGBV(0xEA), RGBV(0xEB),
- RGBV(0xEC), RGBV(0xED), RGBV(0xEE), RGBV(0xEF),
- RGBV(0xF0), RGBV(0xF1), RGBV(0xF2), RGBV(0xF3),
- RGBV(0xF4), RGBV(0xF5), RGBV(0xF6), RGBV(0xF7),
- RGBV(0xF8), RGBV(0xF9), RGBV(0xFA), RGBV(0xFB),
- RGBV(0xFC), RGBV(0xFD), RGBV(0xFE), RGBV(0xFF),
-
- // Alpha multipliers for each alpha level.
- ALPHA(0x00), ALPHA(0x01), ALPHA(0x02), ALPHA(0x03),
- ALPHA(0x04), ALPHA(0x05), ALPHA(0x06), ALPHA(0x07),
- ALPHA(0x08), ALPHA(0x09), ALPHA(0x0A), ALPHA(0x0B),
- ALPHA(0x0C), ALPHA(0x0D), ALPHA(0x0E), ALPHA(0x0F),
- ALPHA(0x10), ALPHA(0x11), ALPHA(0x12), ALPHA(0x13),
- ALPHA(0x14), ALPHA(0x15), ALPHA(0x16), ALPHA(0x17),
- ALPHA(0x18), ALPHA(0x19), ALPHA(0x1A), ALPHA(0x1B),
- ALPHA(0x1C), ALPHA(0x1D), ALPHA(0x1E), ALPHA(0x1F),
- ALPHA(0x20), ALPHA(0x21), ALPHA(0x22), ALPHA(0x23),
- ALPHA(0x24), ALPHA(0x25), ALPHA(0x26), ALPHA(0x27),
- ALPHA(0x28), ALPHA(0x29), ALPHA(0x2A), ALPHA(0x2B),
- ALPHA(0x2C), ALPHA(0x2D), ALPHA(0x2E), ALPHA(0x2F),
- ALPHA(0x30), ALPHA(0x31), ALPHA(0x32), ALPHA(0x33),
- ALPHA(0x34), ALPHA(0x35), ALPHA(0x36), ALPHA(0x37),
- ALPHA(0x38), ALPHA(0x39), ALPHA(0x3A), ALPHA(0x3B),
- ALPHA(0x3C), ALPHA(0x3D), ALPHA(0x3E), ALPHA(0x3F),
- ALPHA(0x40), ALPHA(0x41), ALPHA(0x42), ALPHA(0x43),
- ALPHA(0x44), ALPHA(0x45), ALPHA(0x46), ALPHA(0x47),
- ALPHA(0x48), ALPHA(0x49), ALPHA(0x4A), ALPHA(0x4B),
- ALPHA(0x4C), ALPHA(0x4D), ALPHA(0x4E), ALPHA(0x4F),
- ALPHA(0x50), ALPHA(0x51), ALPHA(0x52), ALPHA(0x53),
- ALPHA(0x54), ALPHA(0x55), ALPHA(0x56), ALPHA(0x57),
- ALPHA(0x58), ALPHA(0x59), ALPHA(0x5A), ALPHA(0x5B),
- ALPHA(0x5C), ALPHA(0x5D), ALPHA(0x5E), ALPHA(0x5F),
- ALPHA(0x60), ALPHA(0x61), ALPHA(0x62), ALPHA(0x63),
- ALPHA(0x64), ALPHA(0x65), ALPHA(0x66), ALPHA(0x67),
- ALPHA(0x68), ALPHA(0x69), ALPHA(0x6A), ALPHA(0x6B),
- ALPHA(0x6C), ALPHA(0x6D), ALPHA(0x6E), ALPHA(0x6F),
- ALPHA(0x70), ALPHA(0x71), ALPHA(0x72), ALPHA(0x73),
- ALPHA(0x74), ALPHA(0x75), ALPHA(0x76), ALPHA(0x77),
- ALPHA(0x78), ALPHA(0x79), ALPHA(0x7A), ALPHA(0x7B),
- ALPHA(0x7C), ALPHA(0x7D), ALPHA(0x7E), ALPHA(0x7F),
- ALPHA(0x80), ALPHA(0x81), ALPHA(0x82), ALPHA(0x83),
- ALPHA(0x84), ALPHA(0x85), ALPHA(0x86), ALPHA(0x87),
- ALPHA(0x88), ALPHA(0x89), ALPHA(0x8A), ALPHA(0x8B),
- ALPHA(0x8C), ALPHA(0x8D), ALPHA(0x8E), ALPHA(0x8F),
- ALPHA(0x90), ALPHA(0x91), ALPHA(0x92), ALPHA(0x93),
- ALPHA(0x94), ALPHA(0x95), ALPHA(0x96), ALPHA(0x97),
- ALPHA(0x98), ALPHA(0x99), ALPHA(0x9A), ALPHA(0x9B),
- ALPHA(0x9C), ALPHA(0x9D), ALPHA(0x9E), ALPHA(0x9F),
- ALPHA(0xA0), ALPHA(0xA1), ALPHA(0xA2), ALPHA(0xA3),
- ALPHA(0xA4), ALPHA(0xA5), ALPHA(0xA6), ALPHA(0xA7),
- ALPHA(0xA8), ALPHA(0xA9), ALPHA(0xAA), ALPHA(0xAB),
- ALPHA(0xAC), ALPHA(0xAD), ALPHA(0xAE), ALPHA(0xAF),
- ALPHA(0xB0), ALPHA(0xB1), ALPHA(0xB2), ALPHA(0xB3),
- ALPHA(0xB4), ALPHA(0xB5), ALPHA(0xB6), ALPHA(0xB7),
- ALPHA(0xB8), ALPHA(0xB9), ALPHA(0xBA), ALPHA(0xBB),
- ALPHA(0xBC), ALPHA(0xBD), ALPHA(0xBE), ALPHA(0xBF),
- ALPHA(0xC0), ALPHA(0xC1), ALPHA(0xC2), ALPHA(0xC3),
- ALPHA(0xC4), ALPHA(0xC5), ALPHA(0xC6), ALPHA(0xC7),
- ALPHA(0xC8), ALPHA(0xC9), ALPHA(0xCA), ALPHA(0xCB),
- ALPHA(0xCC), ALPHA(0xCD), ALPHA(0xCE), ALPHA(0xCF),
- ALPHA(0xD0), ALPHA(0xD1), ALPHA(0xD2), ALPHA(0xD3),
- ALPHA(0xD4), ALPHA(0xD5), ALPHA(0xD6), ALPHA(0xD7),
- ALPHA(0xD8), ALPHA(0xD9), ALPHA(0xDA), ALPHA(0xDB),
- ALPHA(0xDC), ALPHA(0xDD), ALPHA(0xDE), ALPHA(0xDF),
- ALPHA(0xE0), ALPHA(0xE1), ALPHA(0xE2), ALPHA(0xE3),
- ALPHA(0xE4), ALPHA(0xE5), ALPHA(0xE6), ALPHA(0xE7),
- ALPHA(0xE8), ALPHA(0xE9), ALPHA(0xEA), ALPHA(0xEB),
- ALPHA(0xEC), ALPHA(0xED), ALPHA(0xEE), ALPHA(0xEF),
- ALPHA(0xF0), ALPHA(0xF1), ALPHA(0xF2), ALPHA(0xF3),
- ALPHA(0xF4), ALPHA(0xF5), ALPHA(0xF6), ALPHA(0xF7),
- ALPHA(0xF8), ALPHA(0xF9), ALPHA(0xFA), ALPHA(0xFB),
- ALPHA(0xFC), ALPHA(0xFD), ALPHA(0xFE), ALPHA(0xFF),
-};
-
-#undef RGBY
-#undef RGBU
-#undef RGBV
-#undef ALPHA
-
-// JPEG color range version:
-
-// Defines the R,G,B,A contributions from Y.
-#define RGBY(i) { \
- static_cast<int16>(64 * i + 0.5), \
- static_cast<int16>(64 * i + 0.5), \
- static_cast<int16>(64 * i + 0.5), \
- 0 \
-}
-
-// Defines the R,G,B,A contributions from U.
-// The contribution to A is the same for any value of U
-// causing the final A value to be 255 in every conversion.
-// Android's pixel layout is RGBA, while other platforms
-// are BGRA.
-#if defined(OS_ANDROID)
-#define RGBU(i) { \
- 0, \
- static_cast<int16>(-0.34414 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(1.772 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(256 * 64 - 1) \
-}
-#else
-#define RGBU(i) { \
- static_cast<int16>(1.772 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(-0.34414 * 64 * (i - 128) + 0.5), \
- 0, \
- static_cast<int16>(256 * 64 - 1) \
-}
-#endif
-
-// Defines the R,G,B,A contributions from V.
-// Android's pixel layout is RGBA, while other platforms
-// are BGRA.
-#if defined(OS_ANDROID)
-#define RGBV(i) { \
- static_cast<int16>(1.402 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(-0.71414 * 64 * (i - 128) + 0.5), \
- 0, \
- 0 \
-}
-#else
-#define RGBV(i) { \
- 0, \
- static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(1.402 * 64 * (i - 128) + 0.5), \
- 0 \
-}
-#endif
-
-// Used to define a set of multiplier words for each alpha level.
-#define ALPHA(i) { \
- i, i, i, i \
-}
-
-// The following table defines the RGBA contributions
-// for each component of YUVA. The Y table is first followed
-// by the U, and V tables. The alpha multiplier table follows.
-// These tables are aligned and kept adjacent to optimize for
-// SIMD and caching.
-
-SIMD_ALIGNED(const int16 kCoefficientsRgbY_JPEG[256 * 4][4]) = {
- RGBY(0x00), RGBY(0x01), RGBY(0x02), RGBY(0x03),
- RGBY(0x04), RGBY(0x05), RGBY(0x06), RGBY(0x07),
- RGBY(0x08), RGBY(0x09), RGBY(0x0A), RGBY(0x0B),
- RGBY(0x0C), RGBY(0x0D), RGBY(0x0E), RGBY(0x0F),
- RGBY(0x10), RGBY(0x11), RGBY(0x12), RGBY(0x13),
- RGBY(0x14), RGBY(0x15), RGBY(0x16), RGBY(0x17),
- RGBY(0x18), RGBY(0x19), RGBY(0x1A), RGBY(0x1B),
- RGBY(0x1C), RGBY(0x1D), RGBY(0x1E), RGBY(0x1F),
- RGBY(0x20), RGBY(0x21), RGBY(0x22), RGBY(0x23),
- RGBY(0x24), RGBY(0x25), RGBY(0x26), RGBY(0x27),
- RGBY(0x28), RGBY(0x29), RGBY(0x2A), RGBY(0x2B),
- RGBY(0x2C), RGBY(0x2D), RGBY(0x2E), RGBY(0x2F),
- RGBY(0x30), RGBY(0x31), RGBY(0x32), RGBY(0x33),
- RGBY(0x34), RGBY(0x35), RGBY(0x36), RGBY(0x37),
- RGBY(0x38), RGBY(0x39), RGBY(0x3A), RGBY(0x3B),
- RGBY(0x3C), RGBY(0x3D), RGBY(0x3E), RGBY(0x3F),
- RGBY(0x40), RGBY(0x41), RGBY(0x42), RGBY(0x43),
- RGBY(0x44), RGBY(0x45), RGBY(0x46), RGBY(0x47),
- RGBY(0x48), RGBY(0x49), RGBY(0x4A), RGBY(0x4B),
- RGBY(0x4C), RGBY(0x4D), RGBY(0x4E), RGBY(0x4F),
- RGBY(0x50), RGBY(0x51), RGBY(0x52), RGBY(0x53),
- RGBY(0x54), RGBY(0x55), RGBY(0x56), RGBY(0x57),
- RGBY(0x58), RGBY(0x59), RGBY(0x5A), RGBY(0x5B),
- RGBY(0x5C), RGBY(0x5D), RGBY(0x5E), RGBY(0x5F),
- RGBY(0x60), RGBY(0x61), RGBY(0x62), RGBY(0x63),
- RGBY(0x64), RGBY(0x65), RGBY(0x66), RGBY(0x67),
- RGBY(0x68), RGBY(0x69), RGBY(0x6A), RGBY(0x6B),
- RGBY(0x6C), RGBY(0x6D), RGBY(0x6E), RGBY(0x6F),
- RGBY(0x70), RGBY(0x71), RGBY(0x72), RGBY(0x73),
- RGBY(0x74), RGBY(0x75), RGBY(0x76), RGBY(0x77),
- RGBY(0x78), RGBY(0x79), RGBY(0x7A), RGBY(0x7B),
- RGBY(0x7C), RGBY(0x7D), RGBY(0x7E), RGBY(0x7F),
- RGBY(0x80), RGBY(0x81), RGBY(0x82), RGBY(0x83),
- RGBY(0x84), RGBY(0x85), RGBY(0x86), RGBY(0x87),
- RGBY(0x88), RGBY(0x89), RGBY(0x8A), RGBY(0x8B),
- RGBY(0x8C), RGBY(0x8D), RGBY(0x8E), RGBY(0x8F),
- RGBY(0x90), RGBY(0x91), RGBY(0x92), RGBY(0x93),
- RGBY(0x94), RGBY(0x95), RGBY(0x96), RGBY(0x97),
- RGBY(0x98), RGBY(0x99), RGBY(0x9A), RGBY(0x9B),
- RGBY(0x9C), RGBY(0x9D), RGBY(0x9E), RGBY(0x9F),
- RGBY(0xA0), RGBY(0xA1), RGBY(0xA2), RGBY(0xA3),
- RGBY(0xA4), RGBY(0xA5), RGBY(0xA6), RGBY(0xA7),
- RGBY(0xA8), RGBY(0xA9), RGBY(0xAA), RGBY(0xAB),
- RGBY(0xAC), RGBY(0xAD), RGBY(0xAE), RGBY(0xAF),
- RGBY(0xB0), RGBY(0xB1), RGBY(0xB2), RGBY(0xB3),
- RGBY(0xB4), RGBY(0xB5), RGBY(0xB6), RGBY(0xB7),
- RGBY(0xB8), RGBY(0xB9), RGBY(0xBA), RGBY(0xBB),
- RGBY(0xBC), RGBY(0xBD), RGBY(0xBE), RGBY(0xBF),
- RGBY(0xC0), RGBY(0xC1), RGBY(0xC2), RGBY(0xC3),
- RGBY(0xC4), RGBY(0xC5), RGBY(0xC6), RGBY(0xC7),
- RGBY(0xC8), RGBY(0xC9), RGBY(0xCA), RGBY(0xCB),
- RGBY(0xCC), RGBY(0xCD), RGBY(0xCE), RGBY(0xCF),
- RGBY(0xD0), RGBY(0xD1), RGBY(0xD2), RGBY(0xD3),
- RGBY(0xD4), RGBY(0xD5), RGBY(0xD6), RGBY(0xD7),
- RGBY(0xD8), RGBY(0xD9), RGBY(0xDA), RGBY(0xDB),
- RGBY(0xDC), RGBY(0xDD), RGBY(0xDE), RGBY(0xDF),
- RGBY(0xE0), RGBY(0xE1), RGBY(0xE2), RGBY(0xE3),
- RGBY(0xE4), RGBY(0xE5), RGBY(0xE6), RGBY(0xE7),
- RGBY(0xE8), RGBY(0xE9), RGBY(0xEA), RGBY(0xEB),
- RGBY(0xEC), RGBY(0xED), RGBY(0xEE), RGBY(0xEF),
- RGBY(0xF0), RGBY(0xF1), RGBY(0xF2), RGBY(0xF3),
- RGBY(0xF4), RGBY(0xF5), RGBY(0xF6), RGBY(0xF7),
- RGBY(0xF8), RGBY(0xF9), RGBY(0xFA), RGBY(0xFB),
- RGBY(0xFC), RGBY(0xFD), RGBY(0xFE), RGBY(0xFF),
-
- // Chroma U table.
- RGBU(0x00), RGBU(0x01), RGBU(0x02), RGBU(0x03),
- RGBU(0x04), RGBU(0x05), RGBU(0x06), RGBU(0x07),
- RGBU(0x08), RGBU(0x09), RGBU(0x0A), RGBU(0x0B),
- RGBU(0x0C), RGBU(0x0D), RGBU(0x0E), RGBU(0x0F),
- RGBU(0x10), RGBU(0x11), RGBU(0x12), RGBU(0x13),
- RGBU(0x14), RGBU(0x15), RGBU(0x16), RGBU(0x17),
- RGBU(0x18), RGBU(0x19), RGBU(0x1A), RGBU(0x1B),
- RGBU(0x1C), RGBU(0x1D), RGBU(0x1E), RGBU(0x1F),
- RGBU(0x20), RGBU(0x21), RGBU(0x22), RGBU(0x23),
- RGBU(0x24), RGBU(0x25), RGBU(0x26), RGBU(0x27),
- RGBU(0x28), RGBU(0x29), RGBU(0x2A), RGBU(0x2B),
- RGBU(0x2C), RGBU(0x2D), RGBU(0x2E), RGBU(0x2F),
- RGBU(0x30), RGBU(0x31), RGBU(0x32), RGBU(0x33),
- RGBU(0x34), RGBU(0x35), RGBU(0x36), RGBU(0x37),
- RGBU(0x38), RGBU(0x39), RGBU(0x3A), RGBU(0x3B),
- RGBU(0x3C), RGBU(0x3D), RGBU(0x3E), RGBU(0x3F),
- RGBU(0x40), RGBU(0x41), RGBU(0x42), RGBU(0x43),
- RGBU(0x44), RGBU(0x45), RGBU(0x46), RGBU(0x47),
- RGBU(0x48), RGBU(0x49), RGBU(0x4A), RGBU(0x4B),
- RGBU(0x4C), RGBU(0x4D), RGBU(0x4E), RGBU(0x4F),
- RGBU(0x50), RGBU(0x51), RGBU(0x52), RGBU(0x53),
- RGBU(0x54), RGBU(0x55), RGBU(0x56), RGBU(0x57),
- RGBU(0x58), RGBU(0x59), RGBU(0x5A), RGBU(0x5B),
- RGBU(0x5C), RGBU(0x5D), RGBU(0x5E), RGBU(0x5F),
- RGBU(0x60), RGBU(0x61), RGBU(0x62), RGBU(0x63),
- RGBU(0x64), RGBU(0x65), RGBU(0x66), RGBU(0x67),
- RGBU(0x68), RGBU(0x69), RGBU(0x6A), RGBU(0x6B),
- RGBU(0x6C), RGBU(0x6D), RGBU(0x6E), RGBU(0x6F),
- RGBU(0x70), RGBU(0x71), RGBU(0x72), RGBU(0x73),
- RGBU(0x74), RGBU(0x75), RGBU(0x76), RGBU(0x77),
- RGBU(0x78), RGBU(0x79), RGBU(0x7A), RGBU(0x7B),
- RGBU(0x7C), RGBU(0x7D), RGBU(0x7E), RGBU(0x7F),
- RGBU(0x80), RGBU(0x81), RGBU(0x82), RGBU(0x83),
- RGBU(0x84), RGBU(0x85), RGBU(0x86), RGBU(0x87),
- RGBU(0x88), RGBU(0x89), RGBU(0x8A), RGBU(0x8B),
- RGBU(0x8C), RGBU(0x8D), RGBU(0x8E), RGBU(0x8F),
- RGBU(0x90), RGBU(0x91), RGBU(0x92), RGBU(0x93),
- RGBU(0x94), RGBU(0x95), RGBU(0x96), RGBU(0x97),
- RGBU(0x98), RGBU(0x99), RGBU(0x9A), RGBU(0x9B),
- RGBU(0x9C), RGBU(0x9D), RGBU(0x9E), RGBU(0x9F),
- RGBU(0xA0), RGBU(0xA1), RGBU(0xA2), RGBU(0xA3),
- RGBU(0xA4), RGBU(0xA5), RGBU(0xA6), RGBU(0xA7),
- RGBU(0xA8), RGBU(0xA9), RGBU(0xAA), RGBU(0xAB),
- RGBU(0xAC), RGBU(0xAD), RGBU(0xAE), RGBU(0xAF),
- RGBU(0xB0), RGBU(0xB1), RGBU(0xB2), RGBU(0xB3),
- RGBU(0xB4), RGBU(0xB5), RGBU(0xB6), RGBU(0xB7),
- RGBU(0xB8), RGBU(0xB9), RGBU(0xBA), RGBU(0xBB),
- RGBU(0xBC), RGBU(0xBD), RGBU(0xBE), RGBU(0xBF),
- RGBU(0xC0), RGBU(0xC1), RGBU(0xC2), RGBU(0xC3),
- RGBU(0xC4), RGBU(0xC5), RGBU(0xC6), RGBU(0xC7),
- RGBU(0xC8), RGBU(0xC9), RGBU(0xCA), RGBU(0xCB),
- RGBU(0xCC), RGBU(0xCD), RGBU(0xCE), RGBU(0xCF),
- RGBU(0xD0), RGBU(0xD1), RGBU(0xD2), RGBU(0xD3),
- RGBU(0xD4), RGBU(0xD5), RGBU(0xD6), RGBU(0xD7),
- RGBU(0xD8), RGBU(0xD9), RGBU(0xDA), RGBU(0xDB),
- RGBU(0xDC), RGBU(0xDD), RGBU(0xDE), RGBU(0xDF),
- RGBU(0xE0), RGBU(0xE1), RGBU(0xE2), RGBU(0xE3),
- RGBU(0xE4), RGBU(0xE5), RGBU(0xE6), RGBU(0xE7),
- RGBU(0xE8), RGBU(0xE9), RGBU(0xEA), RGBU(0xEB),
- RGBU(0xEC), RGBU(0xED), RGBU(0xEE), RGBU(0xEF),
- RGBU(0xF0), RGBU(0xF1), RGBU(0xF2), RGBU(0xF3),
- RGBU(0xF4), RGBU(0xF5), RGBU(0xF6), RGBU(0xF7),
- RGBU(0xF8), RGBU(0xF9), RGBU(0xFA), RGBU(0xFB),
- RGBU(0xFC), RGBU(0xFD), RGBU(0xFE), RGBU(0xFF),
-
- // Chroma V table.
- RGBV(0x00), RGBV(0x01), RGBV(0x02), RGBV(0x03),
- RGBV(0x04), RGBV(0x05), RGBV(0x06), RGBV(0x07),
- RGBV(0x08), RGBV(0x09), RGBV(0x0A), RGBV(0x0B),
- RGBV(0x0C), RGBV(0x0D), RGBV(0x0E), RGBV(0x0F),
- RGBV(0x10), RGBV(0x11), RGBV(0x12), RGBV(0x13),
- RGBV(0x14), RGBV(0x15), RGBV(0x16), RGBV(0x17),
- RGBV(0x18), RGBV(0x19), RGBV(0x1A), RGBV(0x1B),
- RGBV(0x1C), RGBV(0x1D), RGBV(0x1E), RGBV(0x1F),
- RGBV(0x20), RGBV(0x21), RGBV(0x22), RGBV(0x23),
- RGBV(0x24), RGBV(0x25), RGBV(0x26), RGBV(0x27),
- RGBV(0x28), RGBV(0x29), RGBV(0x2A), RGBV(0x2B),
- RGBV(0x2C), RGBV(0x2D), RGBV(0x2E), RGBV(0x2F),
- RGBV(0x30), RGBV(0x31), RGBV(0x32), RGBV(0x33),
- RGBV(0x34), RGBV(0x35), RGBV(0x36), RGBV(0x37),
- RGBV(0x38), RGBV(0x39), RGBV(0x3A), RGBV(0x3B),
- RGBV(0x3C), RGBV(0x3D), RGBV(0x3E), RGBV(0x3F),
- RGBV(0x40), RGBV(0x41), RGBV(0x42), RGBV(0x43),
- RGBV(0x44), RGBV(0x45), RGBV(0x46), RGBV(0x47),
- RGBV(0x48), RGBV(0x49), RGBV(0x4A), RGBV(0x4B),
- RGBV(0x4C), RGBV(0x4D), RGBV(0x4E), RGBV(0x4F),
- RGBV(0x50), RGBV(0x51), RGBV(0x52), RGBV(0x53),
- RGBV(0x54), RGBV(0x55), RGBV(0x56), RGBV(0x57),
- RGBV(0x58), RGBV(0x59), RGBV(0x5A), RGBV(0x5B),
- RGBV(0x5C), RGBV(0x5D), RGBV(0x5E), RGBV(0x5F),
- RGBV(0x60), RGBV(0x61), RGBV(0x62), RGBV(0x63),
- RGBV(0x64), RGBV(0x65), RGBV(0x66), RGBV(0x67),
- RGBV(0x68), RGBV(0x69), RGBV(0x6A), RGBV(0x6B),
- RGBV(0x6C), RGBV(0x6D), RGBV(0x6E), RGBV(0x6F),
- RGBV(0x70), RGBV(0x71), RGBV(0x72), RGBV(0x73),
- RGBV(0x74), RGBV(0x75), RGBV(0x76), RGBV(0x77),
- RGBV(0x78), RGBV(0x79), RGBV(0x7A), RGBV(0x7B),
- RGBV(0x7C), RGBV(0x7D), RGBV(0x7E), RGBV(0x7F),
- RGBV(0x80), RGBV(0x81), RGBV(0x82), RGBV(0x83),
- RGBV(0x84), RGBV(0x85), RGBV(0x86), RGBV(0x87),
- RGBV(0x88), RGBV(0x89), RGBV(0x8A), RGBV(0x8B),
- RGBV(0x8C), RGBV(0x8D), RGBV(0x8E), RGBV(0x8F),
- RGBV(0x90), RGBV(0x91), RGBV(0x92), RGBV(0x93),
- RGBV(0x94), RGBV(0x95), RGBV(0x96), RGBV(0x97),
- RGBV(0x98), RGBV(0x99), RGBV(0x9A), RGBV(0x9B),
- RGBV(0x9C), RGBV(0x9D), RGBV(0x9E), RGBV(0x9F),
- RGBV(0xA0), RGBV(0xA1), RGBV(0xA2), RGBV(0xA3),
- RGBV(0xA4), RGBV(0xA5), RGBV(0xA6), RGBV(0xA7),
- RGBV(0xA8), RGBV(0xA9), RGBV(0xAA), RGBV(0xAB),
- RGBV(0xAC), RGBV(0xAD), RGBV(0xAE), RGBV(0xAF),
- RGBV(0xB0), RGBV(0xB1), RGBV(0xB2), RGBV(0xB3),
- RGBV(0xB4), RGBV(0xB5), RGBV(0xB6), RGBV(0xB7),
- RGBV(0xB8), RGBV(0xB9), RGBV(0xBA), RGBV(0xBB),
- RGBV(0xBC), RGBV(0xBD), RGBV(0xBE), RGBV(0xBF),
- RGBV(0xC0), RGBV(0xC1), RGBV(0xC2), RGBV(0xC3),
- RGBV(0xC4), RGBV(0xC5), RGBV(0xC6), RGBV(0xC7),
- RGBV(0xC8), RGBV(0xC9), RGBV(0xCA), RGBV(0xCB),
- RGBV(0xCC), RGBV(0xCD), RGBV(0xCE), RGBV(0xCF),
- RGBV(0xD0), RGBV(0xD1), RGBV(0xD2), RGBV(0xD3),
- RGBV(0xD4), RGBV(0xD5), RGBV(0xD6), RGBV(0xD7),
- RGBV(0xD8), RGBV(0xD9), RGBV(0xDA), RGBV(0xDB),
- RGBV(0xDC), RGBV(0xDD), RGBV(0xDE), RGBV(0xDF),
- RGBV(0xE0), RGBV(0xE1), RGBV(0xE2), RGBV(0xE3),
- RGBV(0xE4), RGBV(0xE5), RGBV(0xE6), RGBV(0xE7),
- RGBV(0xE8), RGBV(0xE9), RGBV(0xEA), RGBV(0xEB),
- RGBV(0xEC), RGBV(0xED), RGBV(0xEE), RGBV(0xEF),
- RGBV(0xF0), RGBV(0xF1), RGBV(0xF2), RGBV(0xF3),
- RGBV(0xF4), RGBV(0xF5), RGBV(0xF6), RGBV(0xF7),
- RGBV(0xF8), RGBV(0xF9), RGBV(0xFA), RGBV(0xFB),
- RGBV(0xFC), RGBV(0xFD), RGBV(0xFE), RGBV(0xFF),
-
- // Alpha multipliers for each alpha level.
- ALPHA(0x00), ALPHA(0x01), ALPHA(0x02), ALPHA(0x03),
- ALPHA(0x04), ALPHA(0x05), ALPHA(0x06), ALPHA(0x07),
- ALPHA(0x08), ALPHA(0x09), ALPHA(0x0A), ALPHA(0x0B),
- ALPHA(0x0C), ALPHA(0x0D), ALPHA(0x0E), ALPHA(0x0F),
- ALPHA(0x10), ALPHA(0x11), ALPHA(0x12), ALPHA(0x13),
- ALPHA(0x14), ALPHA(0x15), ALPHA(0x16), ALPHA(0x17),
- ALPHA(0x18), ALPHA(0x19), ALPHA(0x1A), ALPHA(0x1B),
- ALPHA(0x1C), ALPHA(0x1D), ALPHA(0x1E), ALPHA(0x1F),
- ALPHA(0x20), ALPHA(0x21), ALPHA(0x22), ALPHA(0x23),
- ALPHA(0x24), ALPHA(0x25), ALPHA(0x26), ALPHA(0x27),
- ALPHA(0x28), ALPHA(0x29), ALPHA(0x2A), ALPHA(0x2B),
- ALPHA(0x2C), ALPHA(0x2D), ALPHA(0x2E), ALPHA(0x2F),
- ALPHA(0x30), ALPHA(0x31), ALPHA(0x32), ALPHA(0x33),
- ALPHA(0x34), ALPHA(0x35), ALPHA(0x36), ALPHA(0x37),
- ALPHA(0x38), ALPHA(0x39), ALPHA(0x3A), ALPHA(0x3B),
- ALPHA(0x3C), ALPHA(0x3D), ALPHA(0x3E), ALPHA(0x3F),
- ALPHA(0x40), ALPHA(0x41), ALPHA(0x42), ALPHA(0x43),
- ALPHA(0x44), ALPHA(0x45), ALPHA(0x46), ALPHA(0x47),
- ALPHA(0x48), ALPHA(0x49), ALPHA(0x4A), ALPHA(0x4B),
- ALPHA(0x4C), ALPHA(0x4D), ALPHA(0x4E), ALPHA(0x4F),
- ALPHA(0x50), ALPHA(0x51), ALPHA(0x52), ALPHA(0x53),
- ALPHA(0x54), ALPHA(0x55), ALPHA(0x56), ALPHA(0x57),
- ALPHA(0x58), ALPHA(0x59), ALPHA(0x5A), ALPHA(0x5B),
- ALPHA(0x5C), ALPHA(0x5D), ALPHA(0x5E), ALPHA(0x5F),
- ALPHA(0x60), ALPHA(0x61), ALPHA(0x62), ALPHA(0x63),
- ALPHA(0x64), ALPHA(0x65), ALPHA(0x66), ALPHA(0x67),
- ALPHA(0x68), ALPHA(0x69), ALPHA(0x6A), ALPHA(0x6B),
- ALPHA(0x6C), ALPHA(0x6D), ALPHA(0x6E), ALPHA(0x6F),
- ALPHA(0x70), ALPHA(0x71), ALPHA(0x72), ALPHA(0x73),
- ALPHA(0x74), ALPHA(0x75), ALPHA(0x76), ALPHA(0x77),
- ALPHA(0x78), ALPHA(0x79), ALPHA(0x7A), ALPHA(0x7B),
- ALPHA(0x7C), ALPHA(0x7D), ALPHA(0x7E), ALPHA(0x7F),
- ALPHA(0x80), ALPHA(0x81), ALPHA(0x82), ALPHA(0x83),
- ALPHA(0x84), ALPHA(0x85), ALPHA(0x86), ALPHA(0x87),
- ALPHA(0x88), ALPHA(0x89), ALPHA(0x8A), ALPHA(0x8B),
- ALPHA(0x8C), ALPHA(0x8D), ALPHA(0x8E), ALPHA(0x8F),
- ALPHA(0x90), ALPHA(0x91), ALPHA(0x92), ALPHA(0x93),
- ALPHA(0x94), ALPHA(0x95), ALPHA(0x96), ALPHA(0x97),
- ALPHA(0x98), ALPHA(0x99), ALPHA(0x9A), ALPHA(0x9B),
- ALPHA(0x9C), ALPHA(0x9D), ALPHA(0x9E), ALPHA(0x9F),
- ALPHA(0xA0), ALPHA(0xA1), ALPHA(0xA2), ALPHA(0xA3),
- ALPHA(0xA4), ALPHA(0xA5), ALPHA(0xA6), ALPHA(0xA7),
- ALPHA(0xA8), ALPHA(0xA9), ALPHA(0xAA), ALPHA(0xAB),
- ALPHA(0xAC), ALPHA(0xAD), ALPHA(0xAE), ALPHA(0xAF),
- ALPHA(0xB0), ALPHA(0xB1), ALPHA(0xB2), ALPHA(0xB3),
- ALPHA(0xB4), ALPHA(0xB5), ALPHA(0xB6), ALPHA(0xB7),
- ALPHA(0xB8), ALPHA(0xB9), ALPHA(0xBA), ALPHA(0xBB),
- ALPHA(0xBC), ALPHA(0xBD), ALPHA(0xBE), ALPHA(0xBF),
- ALPHA(0xC0), ALPHA(0xC1), ALPHA(0xC2), ALPHA(0xC3),
- ALPHA(0xC4), ALPHA(0xC5), ALPHA(0xC6), ALPHA(0xC7),
- ALPHA(0xC8), ALPHA(0xC9), ALPHA(0xCA), ALPHA(0xCB),
- ALPHA(0xCC), ALPHA(0xCD), ALPHA(0xCE), ALPHA(0xCF),
- ALPHA(0xD0), ALPHA(0xD1), ALPHA(0xD2), ALPHA(0xD3),
- ALPHA(0xD4), ALPHA(0xD5), ALPHA(0xD6), ALPHA(0xD7),
- ALPHA(0xD8), ALPHA(0xD9), ALPHA(0xDA), ALPHA(0xDB),
- ALPHA(0xDC), ALPHA(0xDD), ALPHA(0xDE), ALPHA(0xDF),
- ALPHA(0xE0), ALPHA(0xE1), ALPHA(0xE2), ALPHA(0xE3),
- ALPHA(0xE4), ALPHA(0xE5), ALPHA(0xE6), ALPHA(0xE7),
- ALPHA(0xE8), ALPHA(0xE9), ALPHA(0xEA), ALPHA(0xEB),
- ALPHA(0xEC), ALPHA(0xED), ALPHA(0xEE), ALPHA(0xEF),
- ALPHA(0xF0), ALPHA(0xF1), ALPHA(0xF2), ALPHA(0xF3),
- ALPHA(0xF4), ALPHA(0xF5), ALPHA(0xF6), ALPHA(0xF7),
- ALPHA(0xF8), ALPHA(0xF9), ALPHA(0xFA), ALPHA(0xFB),
- ALPHA(0xFC), ALPHA(0xFD), ALPHA(0xFE), ALPHA(0xFF),
-};
-
-#undef RGBY
-#undef RGBU
-#undef RGBV
-#undef ALPHA
-
-} // extern "C"
diff --git a/chromium/media/base/simd/yuv_to_rgb_table.h b/chromium/media/base/simd/yuv_to_rgb_table.h
deleted file mode 100644
index 1ed6fd86cae..00000000000
--- a/chromium/media/base/simd/yuv_to_rgb_table.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Defines convertion table from YUV to RGB.
-
-#ifndef MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
-#define MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
-
-#include "base/basictypes.h"
-#include "build/build_config.h"
-
-extern "C" {
-
-#if defined(COMPILER_MSVC)
-#define SIMD_ALIGNED(var) __declspec(align(16)) var
-#else
-#define SIMD_ALIGNED(var) var __attribute__((aligned(16)))
-#endif
-
-// Align the table to 16-bytes to allow faster reading.
-extern SIMD_ALIGNED(const int16 kCoefficientsRgbY[256 * 4][4]);
-extern SIMD_ALIGNED(const int16 kCoefficientsRgbY_JPEG[256 * 4][4]);
-
-} // extern "C"
-
-#endif // MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
diff --git a/chromium/media/base/sinc_resampler.cc b/chromium/media/base/sinc_resampler.cc
index 280cd68eb19..fa2cf94b289 100644
--- a/chromium/media/base/sinc_resampler.cc
+++ b/chromium/media/base/sinc_resampler.cc
@@ -111,6 +111,10 @@ static double SincScaleFactor(double io_ratio) {
return sinc_scale_factor;
}
+static int CalculateChunkSize(int block_size_, double io_ratio) {
+ return block_size_ / io_ratio;
+}
+
SincResampler::SincResampler(double io_sample_rate_ratio,
int request_frames,
const ReadCB& read_cb)
@@ -153,6 +157,7 @@ void SincResampler::UpdateRegions(bool second_load) {
r3_ = r0_ + request_frames_ - kKernelSize;
r4_ = r0_ + request_frames_ - kKernelSize / 2;
block_size_ = r4_ - r2_;
+ chunk_size_ = CalculateChunkSize(block_size_, io_sample_rate_ratio_);
// r1_ at the beginning of the buffer.
CHECK_EQ(r1_, input_buffer_.get());
@@ -205,6 +210,7 @@ void SincResampler::SetRatio(double io_sample_rate_ratio) {
}
io_sample_rate_ratio_ = io_sample_rate_ratio;
+ chunk_size_ = CalculateChunkSize(block_size_, io_sample_rate_ratio_);
// Optimize reinitialization by reusing values which are independent of
// |sinc_scale_factor|. Provides a 3x speedup.
@@ -293,8 +299,12 @@ void SincResampler::Resample(int frames, float* destination) {
}
}
-int SincResampler::ChunkSize() const {
- return static_cast<int>(block_size_ / io_sample_rate_ratio_);
+void SincResampler::PrimeWithSilence() {
+ // By enforcing the buffer hasn't been primed, we ensure the input buffer has
+ // already been zeroed during construction or by a previous Flush() call.
+ DCHECK(!buffer_primed_);
+ DCHECK_EQ(input_buffer_[0], 0.0f);
+ UpdateRegions(true);
}
void SincResampler::Flush() {
@@ -305,6 +315,14 @@ void SincResampler::Flush() {
UpdateRegions(false);
}
+double SincResampler::BufferedFrames() const {
+ if (buffer_primed_) {
+ return request_frames_ - virtual_source_idx_;
+ } else {
+ return 0.0;
+ }
+}
+
float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
const float* k2,
double kernel_interpolation_factor) {
diff --git a/chromium/media/base/sinc_resampler.h b/chromium/media/base/sinc_resampler.h
index 79db85390fb..e12f3814264 100644
--- a/chromium/media/base/sinc_resampler.h
+++ b/chromium/media/base/sinc_resampler.h
@@ -48,17 +48,25 @@ class MEDIA_EXPORT SincResampler {
SincResampler(double io_sample_rate_ratio,
int request_frames,
const ReadCB& read_cb);
- virtual ~SincResampler();
+ ~SincResampler();
// Resample |frames| of data from |read_cb_| into |destination|.
void Resample(int frames, float* destination);
// The maximum size in frames that guarantees Resample() will only make a
- // single call to |read_cb_| for more data.
- int ChunkSize() const;
+ // single call to |read_cb_| for more data. Note: If PrimeWithSilence() is
+ // not called, chunk size will grow after the first two Resample() calls by
+ // kKernelSize / (2 * io_sample_rate_ratio). See the .cc file for details.
+ int ChunkSize() const { return chunk_size_; }
+
+ // Guarantees that ChunkSize() will not change between calls by initializing
+ // the input buffer with silence. Note, this will cause the first few samples
+ // of output to be biased towards silence. Must be called again after Flush().
+ void PrimeWithSilence();
// Flush all buffered data and reset internal indices. Not thread safe, do
- // not call while Resample() is in progress.
+ // not call while Resample() is in progress. Note, if PrimeWithSilence() was
+ // previously called it must be called again after the Flush().
void Flush();
// Update |io_sample_rate_ratio_|. SetRatio() will cause a reconstruction of
@@ -68,6 +76,11 @@ class MEDIA_EXPORT SincResampler {
float* get_kernel_for_testing() { return kernel_storage_.get(); }
+ // Return number of input frames consumed by a callback but not yet processed.
+ // Since input/output ratio can be fractional, so can this value.
+ // Zero before first call to Resample().
+ double BufferedFrames() const;
+
private:
FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, Convolve);
FRIEND_TEST_ALL_PREFIXES(SincResamplerPerfTest, Convolve);
@@ -110,6 +123,10 @@ class MEDIA_EXPORT SincResampler {
// The number of source frames processed per pass.
int block_size_;
+ // Cached value used for ChunkSize(). The maximum size in frames that
+ // guarantees Resample() will only ask for input at most once.
+ int chunk_size_;
+
// The size (in samples) of the internal buffer used by the resampler.
const int input_buffer_size_;
diff --git a/chromium/media/base/sinc_resampler_perftest.cc b/chromium/media/base/sinc_resampler_perftest.cc
index b54056af80d..5ee33481b55 100644
--- a/chromium/media/base/sinc_resampler_perftest.cc
+++ b/chromium/media/base/sinc_resampler_perftest.cc
@@ -32,7 +32,7 @@ static void RunConvolveBenchmark(
float (*convolve_fn)(const float*, const float*, const float*, double),
bool aligned,
const std::string& trace_name) {
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
for (int i = 0; i < kBenchmarkIterations; ++i) {
convolve_fn(resampler->get_kernel_for_testing() + (aligned ? 0 : 1),
resampler->get_kernel_for_testing(),
@@ -40,7 +40,7 @@ static void RunConvolveBenchmark(
kKernelInterpolationFactor);
}
double total_time_milliseconds =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ (base::TimeTicks::Now() - start).InMillisecondsF();
perf_test::PrintResult("sinc_resampler_convolve",
"",
trace_name,
diff --git a/chromium/media/base/sinc_resampler_unittest.cc b/chromium/media/base/sinc_resampler_unittest.cc
index c0f9d98f8ba..f3f45c472fb 100644
--- a/chromium/media/base/sinc_resampler_unittest.cc
+++ b/chromium/media/base/sinc_resampler_unittest.cc
@@ -66,6 +66,51 @@ TEST(SincResamplerTest, ChunkedResample) {
resampler.Resample(max_chunk_size, resampled_destination.get());
}
+// Verify priming the resampler avoids changes to ChunkSize() between calls.
+TEST(SincResamplerTest, PrimedResample) {
+ MockSource mock_source;
+
+ // Choose a high ratio of input to output samples which will result in quick
+ // exhaustion of SincResampler's internal buffers.
+ SincResampler resampler(
+ kSampleRateRatio, SincResampler::kDefaultRequestSize,
+ base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
+
+ // Verify the priming adjusts the chunk size within reasonable limits.
+ const int first_chunk_size = resampler.ChunkSize();
+ resampler.PrimeWithSilence();
+ const int max_chunk_size = resampler.ChunkSize();
+
+ EXPECT_NE(first_chunk_size, max_chunk_size);
+ EXPECT_LE(
+ max_chunk_size,
+ static_cast<int>(first_chunk_size + std::ceil(SincResampler::kKernelSize /
+ (2 * kSampleRateRatio))));
+
+ // Verify Flush() resets to an unprimed state.
+ resampler.Flush();
+ EXPECT_EQ(first_chunk_size, resampler.ChunkSize());
+ resampler.PrimeWithSilence();
+ EXPECT_EQ(max_chunk_size, resampler.ChunkSize());
+
+ const int kChunks = 2;
+ const int kMaxFrames = max_chunk_size * kChunks;
+ scoped_ptr<float[]> resampled_destination(new float[kMaxFrames]);
+
+ // Verify requesting ChunkSize() frames causes a single callback.
+ EXPECT_CALL(mock_source, ProvideInput(_, _))
+ .Times(1).WillOnce(ClearBuffer());
+ resampler.Resample(max_chunk_size, resampled_destination.get());
+ EXPECT_EQ(max_chunk_size, resampler.ChunkSize());
+
+ // Verify requesting kChunks * ChunkSize() frames causes kChunks callbacks.
+ testing::Mock::VerifyAndClear(&mock_source);
+ EXPECT_CALL(mock_source, ProvideInput(_, _))
+ .Times(kChunks).WillRepeatedly(ClearBuffer());
+ resampler.Resample(kMaxFrames, resampled_destination.get());
+ EXPECT_EQ(max_chunk_size, resampler.ChunkSize());
+}
+
// Test flush resets the internal state properly.
TEST(SincResamplerTest, Flush) {
MockSource mock_source;
@@ -90,18 +135,16 @@ TEST(SincResamplerTest, Flush) {
ASSERT_FLOAT_EQ(resampled_destination[i], 0);
}
-// Test flush resets the internal state properly.
TEST(SincResamplerTest, DISABLED_SetRatioBench) {
MockSource mock_source;
SincResampler resampler(
kSampleRateRatio, SincResampler::kDefaultRequestSize,
base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
for (int i = 1; i < 10000; ++i)
resampler.SetRatio(1.0 / i);
- double total_time_c_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ double total_time_c_ms = (base::TimeTicks::Now() - start).InMillisecondsF();
printf("SetRatio() took %.2fms.\n", total_time_c_ms);
}
diff --git a/chromium/media/base/stream_parser.cc b/chromium/media/base/stream_parser.cc
index 942afbf8ec9..2412b8219fc 100644
--- a/chromium/media/base/stream_parser.cc
+++ b/chromium/media/base/stream_parser.cc
@@ -12,7 +12,7 @@ namespace media {
StreamParser::InitParameters::InitParameters(base::TimeDelta duration)
: duration(duration),
auto_update_timestamp_offset(false),
- liveness(Demuxer::LIVENESS_UNKNOWN) {
+ liveness(DemuxerStream::LIVENESS_UNKNOWN) {
}
StreamParser::StreamParser() {}
diff --git a/chromium/media/base/stream_parser.h b/chromium/media/base/stream_parser.h
index 398e8e0a1af..c777a16c9b0 100644
--- a/chromium/media/base/stream_parser.h
+++ b/chromium/media/base/stream_parser.h
@@ -14,6 +14,8 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/eme_constants.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
@@ -62,14 +64,12 @@ class MEDIA_EXPORT StreamParser {
bool auto_update_timestamp_offset;
// Indicates live stream.
- Demuxer::Liveness liveness;
+ DemuxerStream::Liveness liveness;
};
// Indicates completion of parser initialization.
- // success - True if initialization was successful.
- // params - Stream parameters, in case of successful initialization.
- typedef base::Callback<void(bool success,
- const InitParameters& params)> InitCB;
+ // params - Stream parameters.
+ typedef base::Callback<void(const InitParameters& params)> InitCB;
// Indicates when new stream configurations have been parsed.
// First parameter - The new audio configuration. If the config is not valid
@@ -106,8 +106,8 @@ class MEDIA_EXPORT StreamParser {
// First parameter - The type of the initialization data associated with the
// stream.
// Second parameter - The initialization data associated with the stream.
- typedef base::Callback<void(const std::string&,
- const std::vector<uint8>&)> NeedKeyCB;
+ typedef base::Callback<void(EmeInitDataType, const std::vector<uint8>&)>
+ EncryptedMediaInitDataCB;
StreamParser();
virtual ~StreamParser();
@@ -117,14 +117,15 @@ class MEDIA_EXPORT StreamParser {
// been parsed to determine the initial stream configurations, presentation
// start time, and duration. If |ignore_text_track| is true, then no text
// buffers should be passed later by the parser to |new_buffers_cb|.
- virtual void Init(const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool ignore_text_track,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) = 0;
+ virtual void Init(
+ const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_track,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) = 0;
// Called when a seek occurs. This flushes the current parser state
// and puts the parser in a state where it can receive data for the new seek
diff --git a/chromium/media/base/stream_parser_buffer.cc b/chromium/media/base/stream_parser_buffer.cc
index 8df4fae7a40..49d0885da15 100644
--- a/chromium/media/base/stream_parser_buffer.cc
+++ b/chromium/media/base/stream_parser_buffer.cc
@@ -19,13 +19,14 @@ static scoped_refptr<StreamParserBuffer> CopyBuffer(
buffer.data_size(),
buffer.side_data(),
buffer.side_data_size(),
- buffer.IsKeyframe(),
+ buffer.is_key_frame(),
buffer.type(),
buffer.track_id());
copied_buffer->SetDecodeTimestamp(buffer.GetDecodeTimestamp());
copied_buffer->SetConfigId(buffer.GetConfigId());
copied_buffer->set_timestamp(buffer.timestamp());
copied_buffer->set_duration(buffer.duration());
+ copied_buffer->set_is_duration_estimated(buffer.is_duration_estimated());
copied_buffer->set_discard_padding(buffer.discard_padding());
copied_buffer->set_splice_timestamp(buffer.splice_timestamp());
const DecryptConfig* decrypt_config = buffer.decrypt_config();
@@ -45,20 +46,20 @@ scoped_refptr<StreamParserBuffer> StreamParserBuffer::CreateEOSBuffer() {
}
scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
- const uint8* data, int data_size, bool is_keyframe, Type type,
+ const uint8* data, int data_size, bool is_key_frame, Type type,
TrackId track_id) {
return make_scoped_refptr(
- new StreamParserBuffer(data, data_size, NULL, 0, is_keyframe, type,
+ new StreamParserBuffer(data, data_size, NULL, 0, is_key_frame, type,
track_id));
}
scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
const uint8* data, int data_size,
const uint8* side_data, int side_data_size,
- bool is_keyframe, Type type, TrackId track_id) {
+ bool is_key_frame, Type type, TrackId track_id) {
return make_scoped_refptr(
new StreamParserBuffer(data, data_size, side_data, side_data_size,
- is_keyframe, type, track_id));
+ is_key_frame, type, track_id));
}
DecodeTimestamp StreamParserBuffer::GetDecodeTimestamp() const {
@@ -73,22 +74,28 @@ void StreamParserBuffer::SetDecodeTimestamp(DecodeTimestamp timestamp) {
preroll_buffer_->SetDecodeTimestamp(timestamp);
}
-StreamParserBuffer::StreamParserBuffer(const uint8* data, int data_size,
+StreamParserBuffer::StreamParserBuffer(const uint8* data,
+ int data_size,
const uint8* side_data,
- int side_data_size, bool is_keyframe,
- Type type, TrackId track_id)
+ int side_data_size,
+ bool is_key_frame,
+ Type type,
+ TrackId track_id)
: DecoderBuffer(data, data_size, side_data, side_data_size),
- is_keyframe_(is_keyframe),
decode_timestamp_(kNoDecodeTimestamp()),
config_id_(kInvalidConfigId),
type_(type),
- track_id_(track_id) {
+ track_id_(track_id),
+ is_duration_estimated_(false) {
// TODO(scherkus): Should DataBuffer constructor accept a timestamp and
// duration to force clients to set them? Today they end up being zero which
// is both a common and valid value and could lead to bugs.
if (data) {
set_duration(kNoTimestamp());
}
+
+ if (is_key_frame)
+ set_is_key_frame(true);
}
StreamParserBuffer::~StreamParserBuffer() {}
@@ -119,6 +126,11 @@ void StreamParserBuffer::ConvertToSpliceBuffer(
<< " dur " << duration().InSecondsF();
DCHECK(!end_of_stream());
+ // Splicing requires non-estimated sample accurate durations to be confident
+ // things will sound smooth. Also, we cannot be certain whether estimated
+ // overlap is really a splice scenario, or just over estimation.
+ DCHECK(!is_duration_estimated_);
+
// Make a copy of this first, before making any changes.
scoped_refptr<StreamParserBuffer> overlapping_buffer = CopyBuffer(*this);
overlapping_buffer->set_splice_timestamp(kNoTimestamp());
@@ -143,7 +155,7 @@ void StreamParserBuffer::ConvertToSpliceBuffer(
SetDecodeTimestamp(first_splice_buffer->GetDecodeTimestamp());
SetConfigId(first_splice_buffer->GetConfigId());
set_timestamp(first_splice_buffer->timestamp());
- is_keyframe_ = first_splice_buffer->IsKeyframe();
+ set_is_key_frame(first_splice_buffer->is_key_frame());
type_ = first_splice_buffer->type();
track_id_ = first_splice_buffer->track_id();
set_splice_timestamp(overlapping_buffer->timestamp());
@@ -166,6 +178,7 @@ void StreamParserBuffer::ConvertToSpliceBuffer(
DCHECK(!buffer->end_of_stream());
DCHECK(!buffer->preroll_buffer().get());
DCHECK(buffer->splice_buffers().empty());
+ DCHECK(!buffer->is_duration_estimated());
splice_buffers_.push_back(CopyBuffer(*buffer.get()));
splice_buffers_.back()->set_splice_timestamp(splice_timestamp());
}
diff --git a/chromium/media/base/stream_parser_buffer.h b/chromium/media/base/stream_parser_buffer.h
index 1651dce4855..c614828537a 100644
--- a/chromium/media/base/stream_parser_buffer.h
+++ b/chromium/media/base/stream_parser_buffer.h
@@ -110,13 +110,12 @@ class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
static scoped_refptr<StreamParserBuffer> CreateEOSBuffer();
static scoped_refptr<StreamParserBuffer> CopyFrom(
- const uint8* data, int data_size, bool is_keyframe, Type type,
+ const uint8* data, int data_size, bool is_key_frame, Type type,
TrackId track_id);
static scoped_refptr<StreamParserBuffer> CopyFrom(
const uint8* data, int data_size,
- const uint8* side_data, int side_data_size, bool is_keyframe, Type type,
+ const uint8* side_data, int side_data_size, bool is_key_frame, Type type,
TrackId track_id);
- bool IsKeyframe() const { return is_keyframe_; }
// Decode timestamp. If not explicitly set, or set to kNoTimestamp(), the
// value will be taken from the normal timestamp.
@@ -171,20 +170,26 @@ class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
void set_timestamp(base::TimeDelta timestamp) override;
+ bool is_duration_estimated() const { return is_duration_estimated_; }
+
+ void set_is_duration_estimated(bool is_estimated) {
+ is_duration_estimated_ = is_estimated;
+ }
+
private:
StreamParserBuffer(const uint8* data, int data_size,
const uint8* side_data, int side_data_size,
- bool is_keyframe, Type type,
+ bool is_key_frame, Type type,
TrackId track_id);
~StreamParserBuffer() override;
- bool is_keyframe_;
DecodeTimestamp decode_timestamp_;
int config_id_;
Type type_;
TrackId track_id_;
BufferQueue splice_buffers_;
scoped_refptr<StreamParserBuffer> preroll_buffer_;
+ bool is_duration_estimated_;
DISALLOW_COPY_AND_ASSIGN(StreamParserBuffer);
};
diff --git a/chromium/media/base/test_helpers.cc b/chromium/media/base/test_helpers.cc
index b37dc0e8f8a..bde12b4061a 100644
--- a/chromium/media/base/test_helpers.cc
+++ b/chromium/media/base/test_helpers.cc
@@ -8,13 +8,14 @@
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/pickle.h"
+#include "base/run_loop.h"
#include "base/test/test_timeouts.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "media/base/audio_buffer.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "ui/gfx/rect.h"
+#include "ui/gfx/geometry/rect.h"
using ::testing::_;
using ::testing::StrictMock;
@@ -84,26 +85,31 @@ void WaitableMessageLoopEvent::RunAndWaitForStatus(PipelineStatus expected) {
return;
}
+ run_loop_.reset(new base::RunLoop());
base::Timer timer(false, false);
timer.Start(FROM_HERE, TestTimeouts::action_timeout(), base::Bind(
&WaitableMessageLoopEvent::OnTimeout, base::Unretained(this)));
- message_loop_->Run();
+ run_loop_->Run();
EXPECT_TRUE(signaled_);
EXPECT_EQ(expected, status_);
+ run_loop_.reset();
}
void WaitableMessageLoopEvent::OnCallback(PipelineStatus status) {
DCHECK_EQ(message_loop_, base::MessageLoop::current());
signaled_ = true;
status_ = status;
- message_loop_->QuitWhenIdle();
+
+ // |run_loop_| may be null if the callback fires before RunAndWaitForStatus().
+ if (run_loop_)
+ run_loop_->Quit();
}
void WaitableMessageLoopEvent::OnTimeout() {
DCHECK_EQ(message_loop_, base::MessageLoop::current());
ADD_FAILURE() << "Timed out waiting for message loop to quit";
- message_loop_->QuitWhenIdle();
+ run_loop_->Quit();
}
static VideoDecoderConfig GetTestConfig(VideoCodec codec,
@@ -222,6 +228,7 @@ scoped_refptr<DecoderBuffer> CreateFakeVideoBufferForTest(
static_cast<int>(pickle.size()));
buffer->set_timestamp(timestamp);
buffer->set_duration(duration);
+ buffer->set_is_key_frame(true);
return buffer;
}
@@ -259,4 +266,10 @@ void CallbackPairChecker::RecordBCalled() {
expecting_b_ = false;
}
+void AddLogEntryForTest(MediaLog::MediaLogLevel level,
+ const std::string& message) {
+ DVLOG(1) << "Media log (" << MediaLog::MediaLogLevelToString(level)
+ << "): " << message;
+}
+
} // namespace media
diff --git a/chromium/media/base/test_helpers.h b/chromium/media/base/test_helpers.h
index 810e0866ecd..712812fc4f6 100644
--- a/chromium/media/base/test_helpers.h
+++ b/chromium/media/base/test_helpers.h
@@ -8,14 +8,16 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "media/base/channel_layout.h"
+#include "media/base/media_log.h"
#include "media/base/pipeline_status.h"
#include "media/base/sample_format.h"
#include "media/base/video_decoder_config.h"
#include "testing/gmock/include/gmock/gmock.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace base {
class MessageLoop;
+class RunLoop;
class TimeDelta;
}
@@ -52,6 +54,8 @@ class WaitableMessageLoopEvent {
// Fails the test if the timeout is reached.
void RunAndWaitForStatus(PipelineStatus expected);
+ bool is_signaled() const { return signaled_; }
+
private:
void OnCallback(PipelineStatus status);
void OnTimeout();
@@ -59,6 +63,7 @@ class WaitableMessageLoopEvent {
base::MessageLoop* message_loop_;
bool signaled_;
PipelineStatus status_;
+ scoped_ptr<base::RunLoop> run_loop_;
DISALLOW_COPY_AND_ASSIGN(WaitableMessageLoopEvent);
};
@@ -138,6 +143,11 @@ class CallbackPairChecker {
bool expecting_b_;
};
+// Test implementation of a media log LogCB that sends media log messages to
+// DVLOG(1).
+void AddLogEntryForTest(MediaLog::MediaLogLevel level,
+ const std::string& message);
+
} // namespace media
#endif // MEDIA_BASE_TEST_HELPERS_H_
diff --git a/chromium/media/base/text_renderer.cc b/chromium/media/base/text_renderer.cc
index 570907eb060..3648f8a0a8f 100644
--- a/chromium/media/base/text_renderer.cc
+++ b/chromium/media/base/text_renderer.cc
@@ -12,7 +12,6 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer.h"
-#include "media/base/demuxer_stream.h"
#include "media/base/text_cue.h"
namespace media {
diff --git a/chromium/media/base/time_delta_interpolator.cc b/chromium/media/base/time_delta_interpolator.cc
index 11ba1cd0858..acff37e72eb 100644
--- a/chromium/media/base/time_delta_interpolator.cc
+++ b/chromium/media/base/time_delta_interpolator.cc
@@ -16,7 +16,7 @@ TimeDeltaInterpolator::TimeDeltaInterpolator(base::TickClock* tick_clock)
: tick_clock_(tick_clock),
interpolating_(false),
upper_bound_(kNoTimestamp()),
- playback_rate_(1.0f) {
+ playback_rate_(1.0) {
DCHECK(tick_clock_);
}
@@ -37,7 +37,7 @@ base::TimeDelta TimeDeltaInterpolator::StopInterpolating() {
return lower_bound_;
}
-void TimeDeltaInterpolator::SetPlaybackRate(float playback_rate) {
+void TimeDeltaInterpolator::SetPlaybackRate(double playback_rate) {
lower_bound_ = GetInterpolatedTime();
reference_ = tick_clock_->NowTicks();
playback_rate_ = playback_rate;
diff --git a/chromium/media/base/time_delta_interpolator.h b/chromium/media/base/time_delta_interpolator.h
index af7535da6f2..7dbda69387b 100644
--- a/chromium/media/base/time_delta_interpolator.h
+++ b/chromium/media/base/time_delta_interpolator.h
@@ -42,7 +42,7 @@ class MEDIA_EXPORT TimeDeltaInterpolator {
// Sets a new rate at which to interpolate.
//
// |tick_clock| will be queried for a new reference time value.
- void SetPlaybackRate(float playback_rate);
+ void SetPlaybackRate(double playback_rate);
// Sets the two timestamps to interpolate between at |playback_rate_|.
// |upper_bound| must be greater or equal to |lower_bound|.
@@ -72,7 +72,7 @@ class MEDIA_EXPORT TimeDeltaInterpolator {
// |lower_bound_| and |upper_bound_|.
base::TimeTicks reference_;
- float playback_rate_;
+ double playback_rate_;
DISALLOW_COPY_AND_ASSIGN(TimeDeltaInterpolator);
};
diff --git a/chromium/media/base/time_delta_interpolator_unittest.cc b/chromium/media/base/time_delta_interpolator_unittest.cc
index 04242f122cd..04ee8f747cb 100644
--- a/chromium/media/base/time_delta_interpolator_unittest.cc
+++ b/chromium/media/base/time_delta_interpolator_unittest.cc
@@ -40,7 +40,7 @@ TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_DoubleSpeed) {
const base::TimeDelta kZero;
const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(5);
- interpolator_.SetPlaybackRate(2.0f);
+ interpolator_.SetPlaybackRate(2.0);
EXPECT_EQ(kZero, interpolator_.StartInterpolating());
AdvanceSystemTime(kTimeToAdvance);
EXPECT_EQ(2 * kTimeToAdvance, interpolator_.GetInterpolatedTime());
@@ -50,7 +50,7 @@ TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_HalfSpeed) {
const base::TimeDelta kZero;
const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(4);
- interpolator_.SetPlaybackRate(0.5f);
+ interpolator_.SetPlaybackRate(0.5);
EXPECT_EQ(kZero, interpolator_.StartInterpolating());
AdvanceSystemTime(kTimeToAdvance);
EXPECT_EQ(kTimeToAdvance / 2, interpolator_.GetInterpolatedTime());
@@ -68,9 +68,9 @@ TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_ZeroSpeed) {
EXPECT_EQ(kZero, interpolator_.StartInterpolating());
AdvanceSystemTime(kPlayDuration1);
- interpolator_.SetPlaybackRate(0.0f);
+ interpolator_.SetPlaybackRate(0.0);
AdvanceSystemTime(kPlayDuration2);
- interpolator_.SetPlaybackRate(1.0f);
+ interpolator_.SetPlaybackRate(1.0);
AdvanceSystemTime(kPlayDuration3);
EXPECT_EQ(kExpected, interpolator_.GetInterpolatedTime());
@@ -86,14 +86,14 @@ TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_MultiSpeed) {
const base::TimeDelta kExpected =
kPlayDuration1 / 2 + kPlayDuration2 + 2 * kPlayDuration3;
- interpolator_.SetPlaybackRate(0.5f);
+ interpolator_.SetPlaybackRate(0.5);
EXPECT_EQ(kZero, interpolator_.StartInterpolating());
AdvanceSystemTime(kPlayDuration1);
- interpolator_.SetPlaybackRate(1.0f);
+ interpolator_.SetPlaybackRate(1.0);
AdvanceSystemTime(kPlayDuration2);
- interpolator_.SetPlaybackRate(2.0f);
+ interpolator_.SetPlaybackRate(2.0);
AdvanceSystemTime(kPlayDuration3);
EXPECT_EQ(kExpected, interpolator_.GetInterpolatedTime());
}
diff --git a/chromium/media/base/time_source.h b/chromium/media/base/time_source.h
index 8ad364b20a2..08cf0f030bd 100644
--- a/chromium/media/base/time_source.h
+++ b/chromium/media/base/time_source.h
@@ -5,6 +5,9 @@
#ifndef MEDIA_BASE_TIME_SOURCE_H_
#define MEDIA_BASE_TIME_SOURCE_H_
+#include <vector>
+
+#include "base/callback.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
@@ -13,6 +16,11 @@ namespace media {
// A TimeSource is capable of providing the current media time.
class MEDIA_EXPORT TimeSource {
public:
+ // Helper alias for converting media timestamps into a wall clock timestamps.
+ using WallClockTimeCB =
+ base::Callback<bool(const std::vector<base::TimeDelta>&,
+ std::vector<base::TimeTicks>*)>;
+
TimeSource() {}
virtual ~TimeSource() {}
@@ -26,31 +34,45 @@ class MEDIA_EXPORT TimeSource {
// Updates the current playback rate. It is expected that values from
// CurrentMediaTime() will eventually reflect the new playback rate (e.g., the
- // media time will advance at half speed if the rate was set to 0.5f).
- virtual void SetPlaybackRate(float playback_rate) = 0;
+ // media time will advance at half speed if the rate was set to 0.5).
+ virtual void SetPlaybackRate(double playback_rate) = 0;
// Sets the media time to start ticking from. Only valid to call while the
// time source is not ticking.
virtual void SetMediaTime(base::TimeDelta time) = 0;
- // Returns the current media time.
+ // Returns the current media timestamp relative to the timestamp set by
+ // SetMediaTime().
//
// Values returned are intended for informational purposes, such as displaying
// UI with the current minute and second count. While it is guaranteed values
// will never go backwards, the frequency at which they update may be low.
virtual base::TimeDelta CurrentMediaTime() = 0;
- // Returns the current media time for use with synchronizing video.
+ // Converts a vector of media timestamps into a vector of wall clock times. If
+ // the media time is stopped, returns false and does not modify the output
+ // vector. Returns true and converts all timestamps otherwise.
+ //
+ // Within a single call to GetWallClockTimes() the returned wall clock times
+ // are a strictly increasing function of the given media times. There is no
+ // such guarantee between calls though; e.g., playback rate or audio delay may
+ // change on other threads within the pipeline.
+ //
+ // Each timestamp converted from |media_timestamps| will be pushed into
+ // |wall_clock_times| such that after all timestamps are converted, the two
+ // vectors are parallel (media_timestamps[i] -> wall_clock_times[i]).
//
- // Differences from CurrentMediaTime():
- // - Values returned update at a much higher frequency (e.g., suitable for
- // playback of 60 FPS content).
- // - As a result, values may go slightly backwards depending on the
- // implementation (e.g., uses interpolation).
+ // |media_timestamps| values too far ahead of the current media time will
+ // be converted to an estimated value; as such, these values may go backwards
+ // in time slightly between calls to GetWallClockTimes().
//
- // TODO(scherkus): Replace with a method that returns wall clock time for a
- // given media time for use with VideoFrameScheduler http://crbug.com/110814
- virtual base::TimeDelta CurrentMediaTimeForSyncingVideo() = 0;
+ // |media_timestamps| values behind the current media time may be
+ // significantly incorrect if the playback rate has changed recently. The only
+ // guarantee is that the returned time will be less than the current wall
+ // clock time.
+ virtual bool GetWallClockTimes(
+ const std::vector<base::TimeDelta>& media_timestamps,
+ std::vector<base::TimeTicks>* wall_clock_times) = 0;
};
} // namespace media
diff --git a/chromium/media/base/user_input_monitor.cc b/chromium/media/base/user_input_monitor.cc
index e43cd626a8f..105afb3935c 100644
--- a/chromium/media/base/user_input_monitor.cc
+++ b/chromium/media/base/user_input_monitor.cc
@@ -5,7 +5,6 @@
#include "media/base/user_input_monitor.h"
#include "base/logging.h"
-#include "third_party/skia/include/core/SkPoint.h"
namespace media {
diff --git a/chromium/media/base/user_input_monitor_linux.cc b/chromium/media/base/user_input_monitor_linux.cc
index 961a9c8664f..55675ecb8c9 100644
--- a/chromium/media/base/user_input_monitor_linux.cc
+++ b/chromium/media/base/user_input_monitor_linux.cc
@@ -294,7 +294,8 @@ void UserInputMonitorLinuxCore::ProcessXEvent(xEvent* event) {
SkIPoint position(SkIPoint::Make(event->u.keyButtonPointer.rootX,
event->u.keyButtonPointer.rootY));
mouse_listeners_->Notify(
- &UserInputMonitor::MouseEventListener::OnMouseMoved, position);
+ FROM_HERE, &UserInputMonitor::MouseEventListener::OnMouseMoved,
+ position);
} else {
ui::EventType type;
if (event->u.u.type == KeyPress) {
diff --git a/chromium/media/base/user_input_monitor_win.cc b/chromium/media/base/user_input_monitor_win.cc
index 6ac3ad8c602..62102367f7e 100644
--- a/chromium/media/base/user_input_monitor_win.cc
+++ b/chromium/media/base/user_input_monitor_win.cc
@@ -39,10 +39,10 @@ class UserInputMonitorWinCore
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
const scoped_refptr<UserInputMonitor::MouseListenerList>&
mouse_listeners);
- ~UserInputMonitorWinCore();
+ ~UserInputMonitorWinCore() override;
// DestructionObserver overrides.
- virtual void WillDestroyCurrentMessageLoop() override;
+ void WillDestroyCurrentMessageLoop() override;
size_t GetKeyPressCount() const;
void StartMonitor(EventBitMask type);
@@ -75,17 +75,17 @@ class UserInputMonitorWin : public UserInputMonitor {
public:
explicit UserInputMonitorWin(
const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner);
- virtual ~UserInputMonitorWin();
+ ~UserInputMonitorWin() override;
// Public UserInputMonitor overrides.
- virtual size_t GetKeyPressCount() const override;
+ size_t GetKeyPressCount() const override;
private:
// Private UserInputMonitor overrides.
- virtual void StartKeyboardMonitoring() override;
- virtual void StopKeyboardMonitoring() override;
- virtual void StartMouseMonitoring() override;
- virtual void StopMouseMonitoring() override;
+ void StartKeyboardMonitoring() override;
+ void StopKeyboardMonitoring() override;
+ void StartMouseMonitoring() override;
+ void StopMouseMonitoring() override;
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
UserInputMonitorWinCore* core_;
@@ -205,7 +205,7 @@ LRESULT UserInputMonitorWinCore::OnInput(HRAWINPUT input_handle) {
position.y = 0;
}
mouse_listeners_->Notify(
- &UserInputMonitor::MouseEventListener::OnMouseMoved,
+ FROM_HERE, &UserInputMonitor::MouseEventListener::OnMouseMoved,
SkIPoint::Make(position.x, position.y));
} else if (input->header.dwType == RIM_TYPEKEYBOARD &&
input->header.hDevice != NULL) {
diff --git a/chromium/media/base/vector_math_perftest.cc b/chromium/media/base/vector_math_perftest.cc
index 2cf4691be4a..ae3b54e6bf5 100644
--- a/chromium/media/base/vector_math_perftest.cc
+++ b/chromium/media/base/vector_math_perftest.cc
@@ -36,7 +36,7 @@ class VectorMathPerfTest : public testing::Test {
bool aligned,
const std::string& test_name,
const std::string& trace_name) {
- TimeTicks start = TimeTicks::HighResNow();
+ TimeTicks start = TimeTicks::Now();
for (int i = 0; i < kBenchmarkIterations; ++i) {
fn(input_vector_.get(),
kScale,
@@ -44,7 +44,7 @@ class VectorMathPerfTest : public testing::Test {
output_vector_.get());
}
double total_time_milliseconds =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
+ (TimeTicks::Now() - start).InMillisecondsF();
perf_test::PrintResult(test_name,
"",
trace_name,
@@ -58,12 +58,12 @@ class VectorMathPerfTest : public testing::Test {
int len,
const std::string& test_name,
const std::string& trace_name) {
- TimeTicks start = TimeTicks::HighResNow();
+ TimeTicks start = TimeTicks::Now();
for (int i = 0; i < kEWMABenchmarkIterations; ++i) {
fn(0.5f, input_vector_.get(), len, 0.1f);
}
double total_time_milliseconds =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
+ (TimeTicks::Now() - start).InMillisecondsF();
perf_test::PrintResult(test_name,
"",
trace_name,
diff --git a/chromium/media/video/capture/video_capture_types.cc b/chromium/media/base/video_capture_types.cc
index f7638b1580f..f8463cfa5d6 100644
--- a/chromium/media/video/capture/video_capture_types.cc
+++ b/chromium/media/base/video_capture_types.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/video_capture_types.h"
+#include "media/base/video_capture_types.h"
#include "base/logging.h"
#include "base/strings/stringprintf.h"
@@ -31,8 +31,42 @@ bool VideoCaptureFormat::IsValid() const {
(pixel_format < PIXEL_FORMAT_MAX);
}
+size_t VideoCaptureFormat::ImageAllocationSize() const {
+ size_t result_frame_size = frame_size.GetArea();
+ switch (pixel_format) {
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ result_frame_size = result_frame_size * 3 / 2;
+ break;
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ result_frame_size *= 2;
+ break;
+ case PIXEL_FORMAT_RGB24:
+ result_frame_size *= 3;
+ break;
+ case PIXEL_FORMAT_RGB32:
+ case PIXEL_FORMAT_ARGB:
+ // GpuMemoryBuffer is an endianness-agnostic 32bpp pixel format until
+ // http://crbug.com/439520 is closed.
+ case PIXEL_FORMAT_GPUMEMORYBUFFER:
+ result_frame_size *= 4;
+ break;
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_TEXTURE:
+ result_frame_size = 0;
+ break;
+ default: // Sizes for the rest of the formats are unknown.
+ NOTREACHED() << "Unknown pixel format provided.";
+ break;
+ }
+ return result_frame_size;
+}
+
std::string VideoCaptureFormat::ToString() const {
- return base::StringPrintf("resolution: %s, fps: %f, pixel format: %s",
+ return base::StringPrintf("resolution: %s, fps: %.3f, pixel format: %s",
frame_size.ToString().c_str(),
frame_rate,
PixelFormatToString(pixel_format).c_str());
@@ -47,19 +81,25 @@ std::string VideoCaptureFormat::PixelFormatToString(VideoPixelFormat format) {
case PIXEL_FORMAT_YUY2:
return "YUY2";
case PIXEL_FORMAT_UYVY:
- return "UYUY";
+ return "UYVY";
case PIXEL_FORMAT_RGB24:
return "RGB24";
+ case PIXEL_FORMAT_RGB32:
+ return "RGB32";
case PIXEL_FORMAT_ARGB:
return "ARGB";
case PIXEL_FORMAT_MJPEG:
return "MJPEG";
+ case PIXEL_FORMAT_NV12:
+ return "NV12";
case PIXEL_FORMAT_NV21:
- return "YV12";
+ return "NV21";
case PIXEL_FORMAT_YV12:
return "YV12";
case PIXEL_FORMAT_TEXTURE:
return "TEXTURE";
+ case PIXEL_FORMAT_GPUMEMORYBUFFER:
+ return "GPUMEMORYBUFFER";
case PIXEL_FORMAT_MAX:
break;
}
@@ -68,14 +108,6 @@ std::string VideoCaptureFormat::PixelFormatToString(VideoPixelFormat format) {
}
VideoCaptureParams::VideoCaptureParams()
- : resolution_change_policy(RESOLUTION_POLICY_FIXED) {}
-
-ImageCaptureFormat::ImageCaptureFormat() : pixel_format(PIXEL_FORMAT_UNKNOWN) {
-}
-
-ImageCaptureFormat::ImageCaptureFormat(const gfx::Size& frame_size,
- VideoPixelFormat pixel_format)
- : frame_size(frame_size), pixel_format(pixel_format) {
-}
+ : resolution_change_policy(RESOLUTION_POLICY_FIXED_RESOLUTION) {}
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_types.h b/chromium/media/base/video_capture_types.h
index 91118b91ff2..c849ed7b35f 100644
--- a/chromium/media/video/capture/video_capture_types.h
+++ b/chromium/media/base/video_capture_types.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_TYPES_H_
-#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_TYPES_H_
+#ifndef MEDIA_BASE_VIDEO_CAPTURE_TYPES_H_
+#define MEDIA_BASE_VIDEO_CAPTURE_TYPES_H_
#include <vector>
#include "media/base/media_export.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -20,30 +20,38 @@ typedef int VideoCaptureSessionId;
enum VideoPixelFormat {
PIXEL_FORMAT_I420,
PIXEL_FORMAT_YV12,
+ PIXEL_FORMAT_NV12,
PIXEL_FORMAT_NV21,
PIXEL_FORMAT_UYVY,
PIXEL_FORMAT_YUY2,
PIXEL_FORMAT_RGB24,
+ PIXEL_FORMAT_RGB32,
PIXEL_FORMAT_ARGB,
PIXEL_FORMAT_MJPEG,
PIXEL_FORMAT_TEXTURE, // Capture format as a GL texture.
+ PIXEL_FORMAT_GPUMEMORYBUFFER,
PIXEL_FORMAT_UNKNOWN, // Color format not set.
PIXEL_FORMAT_MAX,
};
-// Policies for capture devices that has source content with dynamic resolution.
+// Policies for capture devices that have source content that varies in size.
+// It is up to the implementation how the captured content will be transformed
+// (e.g., scaling and/or letterboxing) in order to produce video frames that
+// strictly adheree to one of these policies.
enum ResolutionChangePolicy {
// Capture device outputs a fixed resolution all the time. The resolution of
// the first frame is the resolution for all frames.
- // It is implementation specific for the capture device to scale, letter-box
- // and pillar-box. The only guarantee is that resolution will never change.
- RESOLUTION_POLICY_FIXED,
+ RESOLUTION_POLICY_FIXED_RESOLUTION,
- // Capture device outputs frames with dynamic resolution. The width and height
- // will not exceed the maximum dimensions specified. The typical scenario is
- // the frames will have the same aspect ratio as the original content and
- // scaled down to fit inside the limit.
- RESOLUTION_POLICY_DYNAMIC_WITHIN_LIMIT,
+ // Capture device is allowed to output frames of varying resolutions. The
+ // width and height will not exceed the maximum dimensions specified. The
+ // aspect ratio of the frames will match the aspect ratio of the maximum
+ // dimensions as closely as possible.
+ RESOLUTION_POLICY_FIXED_ASPECT_RATIO,
+
+ // Capture device is allowed to output frames of varying resolutions not
+ // exceeding the maximum dimensions specified.
+ RESOLUTION_POLICY_ANY_WITHIN_LIMIT,
RESOLUTION_POLICY_LAST,
};
@@ -66,34 +74,27 @@ class MEDIA_EXPORT VideoCaptureFormat {
std::string ToString() const;
static std::string PixelFormatToString(VideoPixelFormat format);
+ // Returns the required buffer size to hold an image of a given
+ // VideoCaptureFormat with no padding and tightly packed.
+ size_t ImageAllocationSize() const;
+
// Checks that all values are in the expected range. All limits are specified
// in media::Limits.
bool IsValid() const;
- gfx::Size frame_size;
- float frame_rate;
- VideoPixelFormat pixel_format;
-};
-
-// Image capture format specification.
-// This class is used by the video capture device to specify the format of a
-// still image captured and returned to a client. A list of these is also
-// provided when client queries supported formats for still image capture.
-class MEDIA_EXPORT ImageCaptureFormat {
- public:
- ImageCaptureFormat();
-
- ImageCaptureFormat(const gfx::Size& frame_size,
- VideoPixelFormat pixel_format);
+ bool operator==(const VideoCaptureFormat& other) const {
+ return frame_size == other.frame_size &&
+ frame_rate == other.frame_rate &&
+ pixel_format == other.pixel_format;
+ }
gfx::Size frame_size;
+ float frame_rate;
VideoPixelFormat pixel_format;
};
typedef std::vector<VideoCaptureFormat> VideoCaptureFormats;
-typedef std::vector<ImageCaptureFormat> ImageCaptureFormats;
-
// Parameters for starting video capture.
// This class is used by the client of a video capture device to specify the
// format of frames in which the client would like to have captured frames
@@ -102,6 +103,11 @@ class MEDIA_EXPORT VideoCaptureParams {
public:
VideoCaptureParams();
+ bool operator==(const VideoCaptureParams& other) const {
+ return requested_format == other.requested_format &&
+ resolution_change_policy == other.resolution_change_policy;
+ }
+
// Requests a resolution and format at which the capture will occur.
VideoCaptureFormat requested_format;
@@ -111,4 +117,4 @@ class MEDIA_EXPORT VideoCaptureParams {
} // namespace media
-#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_TYPES_H_
+#endif // MEDIA_BASE_VIDEO_CAPTURE_TYPES_H_
diff --git a/chromium/media/base/video_capturer_source.cc b/chromium/media/base/video_capturer_source.cc
new file mode 100644
index 00000000000..c1ebbe1dbff
--- /dev/null
+++ b/chromium/media/base/video_capturer_source.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_capturer_source.h"
+
+namespace media {
+
+VideoCapturerSource::~VideoCapturerSource() {}
+
+} // namespace media
diff --git a/chromium/media/base/video_capturer_source.h b/chromium/media/base/video_capturer_source.h
new file mode 100644
index 00000000000..774f28da02f
--- /dev/null
+++ b/chromium/media/base/video_capturer_source.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURER_SOURCE_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURER_SOURCE_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/media_export.h"
+#include "media/base/video_capture_types.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+// VideoCapturerSource is an interface representing the source for
+// captured video. An implementation will periodically call the frame
+// callback with new video frames.
+class MEDIA_EXPORT VideoCapturerSource {
+ public:
+ virtual ~VideoCapturerSource();
+
+ // This callback is used to deliver video frames.
+ //
+ // |estimated_capture_time| - The capture time of the delivered video
+ // frame. This field represents the local time at which either: 1) the frame
+ // was generated, if it was done so locally; or 2) the targeted play-out time
+ // of the frame, if it was generated from a remote source. Either way, an
+ // implementation should not present the frame before this point-in-time. This
+ // value is NOT a high-resolution timestamp, and so it should not be used as a
+ // presentation time; but, instead, it should be used for buffering playback
+ // and for A/V synchronization purposes. NOTE: It is possible for this value
+ // to be null if the current implementation lacks this timing information.
+ //
+ // |video_frame->timestamp()| gives the presentation timestamp of the video
+ // frame relative to the first frame generated by the corresponding source.
+ // Because a source can start generating frames before a subscriber is added,
+ // the first video frame delivered may not have timestamp equal to 0.
+ typedef base::Callback<
+ void(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& estimated_capture_time)>
+ VideoCaptureDeliverFrameCB;
+
+ typedef base::Callback<void(const media::VideoCaptureFormats&)>
+ VideoCaptureDeviceFormatsCB;
+
+ typedef base::Callback<void(bool)> RunningCallback;
+
+ // Collects the formats that can currently be used.
+ // |max_requested_height|, |max_requested_width|, and
+ // |max_requested_frame_rate| is used by Tab and Screen capture to decide what
+ // resolution/framerate to generate. |callback| is triggered when the formats
+ // have been collected.
+ virtual void GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ double max_requested_frame_rate,
+ const VideoCaptureDeviceFormatsCB& callback) = 0;
+
+ // Starts capturing frames using the resolution in |params|.
+ // |new_frame_callback| is triggered on |frame_callback_task_runner|
+ // when a new video frame is available.
+ // If capturing is started successfully then |running_callback| will be
+ // called with a parameter of true. Note that some implementations may
+ // simply reject StartCapture (by calling running_callback with a false
+ // argument) if called with the wrong task runner.
+ // If capturing fails to start or stopped due to an external event then
+ // |running_callback| will be called with a parameter of false.
+ // |running_callback| will always be called on the same thread as the
+ // StartCapture.
+ virtual void StartCapture(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& new_frame_callback,
+ scoped_refptr<base::SingleThreadTaskRunner> frame_callback_task_runner,
+ const RunningCallback& running_callback) = 0;
+
+ // Stops capturing frames and clears all callbacks including the
+ // SupportedFormatsCallback callback. Note that queued frame callbacks
+ // may still occur after this call, so the caller must take care to
+ // use refcounted or weak references in |new_frame_callback|.
+ virtual void StopCapture() = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURER_SOURCE_H_
diff --git a/chromium/media/base/video_decoder.h b/chromium/media/base/video_decoder.h
index 52aa86762f1..c49e0dcdbd2 100644
--- a/chromium/media/base/video_decoder.h
+++ b/chromium/media/base/video_decoder.h
@@ -11,7 +11,7 @@
#include "base/memory/ref_counted.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -55,6 +55,12 @@ class MEDIA_EXPORT VideoDecoder {
// |status_cb| upon completion. |output_cb| is called for each output frame
// decoded by Decode().
//
+ // If |low_delay| is true then the decoder is not allowed to queue frames,
+ // except for out-of-order frames, i.e. if the next frame can be returned it
+ // must be returned without waiting for Decode() to be called again.
+ // Initialization should fail if |low_delay| is true and the decoder cannot
+ // satisfy the requirements above.
+ //
// Note:
// 1) The VideoDecoder will be reinitialized if it was initialized before.
// Upon reinitialization, all internal buffered frames will be dropped.
@@ -78,8 +84,9 @@ class MEDIA_EXPORT VideoDecoder {
// called again).
//
// After decoding is finished the decoder calls |output_cb| specified in
- // Initialize() for each decoded frame. |output_cb| may be called before or
- // after |decode_cb|.
+ // Initialize() for each decoded frame. In general |output_cb| may be called
+ // before or after |decode_cb|, but software decoders normally call
+ // |output_cb| before calling |decode_cb|, i.e. while Decode() is pending.
//
// If |buffer| is an EOS buffer then the decoder must be flushed, i.e.
// |output_cb| must be called for each frame pending in the queue and
diff --git a/chromium/media/base/video_decoder_config.cc b/chromium/media/base/video_decoder_config.cc
index d2b6e410103..27fe9f59b9f 100644
--- a/chromium/media/base/video_decoder_config.cc
+++ b/chromium/media/base/video_decoder_config.cc
@@ -113,7 +113,7 @@ bool VideoDecoderConfig::Matches(const VideoDecoderConfig& config) const {
std::string VideoDecoderConfig::AsHumanReadableString() const {
std::ostringstream s;
- s << "codec: " << codec()
+ s << "codec: " << GetHumanReadableCodecName()
<< " format: " << format()
<< " profile: " << profile()
<< " coded size: [" << coded_size().width()
@@ -129,6 +129,30 @@ std::string VideoDecoderConfig::AsHumanReadableString() const {
return s.str();
}
+// The names come from src/third_party/ffmpeg/libavcodec/codec_desc.c
+std::string VideoDecoderConfig::GetHumanReadableCodecName() const {
+ switch (codec()) {
+ case kUnknownVideoCodec:
+ return "unknown";
+ case kCodecH264:
+ return "h264";
+ case kCodecVC1:
+ return "vc1";
+ case kCodecMPEG2:
+ return "mpeg2video";
+ case kCodecMPEG4:
+ return "mpeg4";
+ case kCodecTheora:
+ return "theora";
+ case kCodecVP8:
+ return "vp8";
+ case kCodecVP9:
+ return "vp9";
+ }
+ NOTREACHED();
+ return "";
+}
+
VideoCodec VideoDecoderConfig::codec() const {
return codec_;
}
diff --git a/chromium/media/base/video_decoder_config.h b/chromium/media/base/video_decoder_config.h
index 5d01d086033..356b467f640 100644
--- a/chromium/media/base/video_decoder_config.h
+++ b/chromium/media/base/video_decoder_config.h
@@ -11,8 +11,8 @@
#include "base/basictypes.h"
#include "media/base/media_export.h"
#include "media/base/video_frame.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -109,6 +109,8 @@ class MEDIA_EXPORT VideoDecoderConfig {
// output only.
std::string AsHumanReadableString() const;
+ std::string GetHumanReadableCodecName() const;
+
VideoCodec codec() const;
VideoCodecProfile profile() const;
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
index abea8834466..cae98705170 100644
--- a/chromium/media/base/video_frame.cc
+++ b/chromium/media/base/video_frame.cc
@@ -11,14 +11,9 @@
#include "base/logging.h"
#include "base/memory/aligned_memory.h"
#include "base/strings/string_piece.h"
-#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/limits.h"
#include "media/base/video_util.h"
-#include "ui/gfx/point.h"
-
-#if !defined(MEDIA_FOR_CAST_IOS)
-#include "third_party/skia/include/core/SkBitmap.h"
-#endif
+#include "ui/gfx/geometry/point.h"
namespace media {
@@ -57,6 +52,7 @@ static gfx::Size SampleSize(VideoFrame::Format format, size_t plane) {
case VideoFrame::YV12:
case VideoFrame::YV12J:
+ case VideoFrame::YV12HD:
case VideoFrame::I420:
case VideoFrame::YV12A:
case VideoFrame::NV12:
@@ -67,6 +63,7 @@ static gfx::Size SampleSize(VideoFrame::Format format, size_t plane) {
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
case VideoFrame::NATIVE_TEXTURE:
+ case VideoFrame::ARGB:
break;
}
}
@@ -91,7 +88,13 @@ static gfx::Size CommonAlignment(VideoFrame::Format format) {
// 2 for the UV plane in NV12.
static int BytesPerElement(VideoFrame::Format format, size_t plane) {
DCHECK(VideoFrame::IsValidPlane(plane, format));
- return (format == VideoFrame::NV12 && plane == VideoFrame::kUVPlane) ? 2 : 1;
+ if (format == VideoFrame::ARGB)
+ return 4;
+
+ if (format == VideoFrame::NV12 && plane == VideoFrame::kUVPlane)
+ return 2;
+
+ return 1;
}
// Rounds up |coded_size| if necessary for |format|.
@@ -109,12 +112,26 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
- DCHECK(format != VideoFrame::UNKNOWN &&
- format != VideoFrame::NV12 &&
- format != VideoFrame::NATIVE_TEXTURE);
+ switch (format) {
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ case VideoFrame::YV12A:
+ case VideoFrame::YV12J:
+ case VideoFrame::YV24:
+ case VideoFrame::YV12HD:
+ break;
+
+ case VideoFrame::UNKNOWN:
+ case VideoFrame::NV12:
+ case VideoFrame::NATIVE_TEXTURE:
#if defined(VIDEO_HOLE)
- DCHECK(format != VideoFrame::HOLE);
+ case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
+ case VideoFrame::ARGB:
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
// Since we're creating a new YUV frame (and allocating memory for it
// ourselves), we can pad the requested |coded_size| if necessary if the
@@ -122,14 +139,10 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
DCHECK(IsValidConfig(format, new_coded_size, visible_rect, natural_size));
+ gpu::MailboxHolder mailboxes[kMaxPlanes];
scoped_refptr<VideoFrame> frame(
- new VideoFrame(format,
- new_coded_size,
- visible_rect,
- natural_size,
- scoped_ptr<gpu::MailboxHolder>(),
- timestamp,
- false));
+ new VideoFrame(format, new_coded_size, visible_rect, natural_size,
+ mailboxes, TEXTURE_RGBA, timestamp, false));
frame->AllocateYUV();
return frame;
}
@@ -159,6 +172,10 @@ std::string VideoFrame::FormatToString(VideoFrame::Format format) {
return "NV12";
case VideoFrame::YV24:
return "YV24";
+ case VideoFrame::ARGB:
+ return "ARGB";
+ case VideoFrame::YV12HD:
+ return "YV12HD";
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
return "";
@@ -201,7 +218,9 @@ bool VideoFrame::IsValidConfig(VideoFrame::Format format,
case VideoFrame::I420:
case VideoFrame::YV12A:
case VideoFrame::NV12:
+ case VideoFrame::YV12HD:
case VideoFrame::YV16:
+ case VideoFrame::ARGB:
// Check that software-allocated buffer formats are aligned correctly and
// not empty.
const gfx::Size alignment = CommonAlignment(format);
@@ -219,33 +238,47 @@ bool VideoFrame::IsValidConfig(VideoFrame::Format format,
// static
scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
- scoped_ptr<gpu::MailboxHolder> mailbox_holder,
+ const gpu::MailboxHolder& mailbox_holder,
const ReleaseMailboxCB& mailbox_holder_release_cb,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp,
- const ReadPixelsCB& read_pixels_cb) {
- scoped_refptr<VideoFrame> frame(new VideoFrame(NATIVE_TEXTURE,
- coded_size,
- visible_rect,
- natural_size,
- mailbox_holder.Pass(),
- timestamp,
- false));
- frame->mailbox_holder_release_cb_ = mailbox_holder_release_cb;
- frame->read_pixels_cb_ = read_pixels_cb;
-
+ bool allow_overlay,
+ bool has_alpha) {
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
+ mailbox_holders[kARGBPlane] = mailbox_holder;
+ TextureFormat texture_format = has_alpha ? TEXTURE_RGBA : TEXTURE_RGB;
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(NATIVE_TEXTURE, coded_size, visible_rect, natural_size,
+ mailbox_holders, texture_format, timestamp, false));
+ frame->mailbox_holders_release_cb_ = mailbox_holder_release_cb;
+ frame->allow_overlay_ = allow_overlay;
return frame;
}
-#if !defined(MEDIA_FOR_CAST_IOS)
-void VideoFrame::ReadPixelsFromNativeTexture(const SkBitmap& pixels) {
- DCHECK_EQ(format_, NATIVE_TEXTURE);
- if (!read_pixels_cb_.is_null())
- read_pixels_cb_.Run(pixels);
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapYUV420NativeTextures(
+ const gpu::MailboxHolder& y_mailbox_holder,
+ const gpu::MailboxHolder& u_mailbox_holder,
+ const gpu::MailboxHolder& v_mailbox_holder,
+ const ReleaseMailboxCB& mailbox_holder_release_cb,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp,
+ bool allow_overlay) {
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
+ mailbox_holders[kYPlane] = y_mailbox_holder;
+ mailbox_holders[kUPlane] = u_mailbox_holder;
+ mailbox_holders[kVPlane] = v_mailbox_holder;
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(NATIVE_TEXTURE, coded_size, visible_rect, natural_size,
+ mailbox_holders, TEXTURE_YUV_420, timestamp, false));
+ frame->mailbox_holders_release_cb_ = mailbox_holder_release_cb;
+ frame->allow_overlay_ = allow_overlay;
+ return frame;
}
-#endif
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
@@ -256,6 +289,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
uint8* data,
size_t data_size,
base::SharedMemoryHandle handle,
+ size_t data_offset,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb) {
const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
@@ -267,15 +301,12 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
switch (format) {
case VideoFrame::I420: {
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
scoped_refptr<VideoFrame> frame(
- new VideoFrame(format,
- new_coded_size,
- visible_rect,
- natural_size,
- scoped_ptr<gpu::MailboxHolder>(),
- timestamp,
- false));
+ new VideoFrame(format, new_coded_size, visible_rect, natural_size,
+ mailbox_holders, TEXTURE_RGBA, timestamp, false));
frame->shared_memory_handle_ = handle;
+ frame->shared_memory_offset_ = data_offset;
frame->strides_[kYPlane] = new_coded_size.width();
frame->strides_[kUPlane] = new_coded_size.width() / 2;
frame->strides_[kVPlane] = new_coded_size.width() / 2;
@@ -291,6 +322,37 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
}
}
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ int32 y_stride,
+ int32 u_stride,
+ int32 v_stride,
+ uint8* y_data,
+ uint8* u_data,
+ uint8* v_data,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb) {
+ const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
+ CHECK(IsValidConfig(format, new_coded_size, visible_rect, natural_size));
+
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(format, new_coded_size, visible_rect, natural_size,
+ mailbox_holders, TEXTURE_RGBA, timestamp, false));
+ frame->strides_[kYPlane] = y_stride;
+ frame->strides_[kUPlane] = u_stride;
+ frame->strides_[kVPlane] = v_stride;
+ frame->data_[kYPlane] = y_data;
+ frame->data_[kUPlane] = u_data;
+ frame->data_[kVPlane] = v_data;
+ frame->no_longer_needed_cb_ = no_longer_needed_cb;
+ return frame;
+}
+
#if defined(OS_POSIX)
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalDmabufs(
@@ -304,19 +366,17 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalDmabufs(
if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
return NULL;
+ // TODO(posciak): This is not exactly correct, it's possible for one
+ // buffer to contain more than one plane.
if (dmabuf_fds.size() != NumPlanes(format)) {
LOG(FATAL) << "Not enough dmabuf fds provided!";
return NULL;
}
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
scoped_refptr<VideoFrame> frame(
- new VideoFrame(format,
- coded_size,
- visible_rect,
- natural_size,
- scoped_ptr<gpu::MailboxHolder>(),
- timestamp,
- false));
+ new VideoFrame(format, coded_size, visible_rect, natural_size,
+ mailbox_holders, TEXTURE_RGBA, timestamp, false));
for (size_t i = 0; i < dmabuf_fds.size(); ++i) {
int duped_fd = HANDLE_EINTR(dup(dmabuf_fds[i]));
@@ -369,14 +429,10 @@ scoped_refptr<VideoFrame> VideoFrame::WrapCVPixelBuffer(
if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
return NULL;
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
scoped_refptr<VideoFrame> frame(
- new VideoFrame(format,
- coded_size,
- visible_rect,
- natural_size,
- scoped_ptr<gpu::MailboxHolder>(),
- timestamp,
- false));
+ new VideoFrame(format, coded_size, visible_rect, natural_size,
+ mailbox_holders, TEXTURE_RGBA, timestamp, false));
frame->cv_pixel_buffer_.reset(cv_pixel_buffer, base::scoped_policy::RETAIN);
return frame;
@@ -384,41 +440,6 @@ scoped_refptr<VideoFrame> VideoFrame::WrapCVPixelBuffer(
#endif
// static
-scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
- Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- int32 y_stride,
- int32 u_stride,
- int32 v_stride,
- uint8* y_data,
- uint8* u_data,
- uint8* v_data,
- base::TimeDelta timestamp,
- const base::Closure& no_longer_needed_cb) {
- const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
- CHECK(IsValidConfig(format, new_coded_size, visible_rect, natural_size));
-
- scoped_refptr<VideoFrame> frame(
- new VideoFrame(format,
- new_coded_size,
- visible_rect,
- natural_size,
- scoped_ptr<gpu::MailboxHolder>(),
- timestamp,
- false));
- frame->strides_[kYPlane] = y_stride;
- frame->strides_[kUPlane] = u_stride;
- frame->strides_[kVPlane] = v_stride;
- frame->data_[kYPlane] = y_data;
- frame->data_[kUPlane] = u_data;
- frame->data_[kVPlane] = v_data;
- frame->no_longer_needed_cb_ = no_longer_needed_cb;
- return frame;
-}
-
-// static
scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
const scoped_refptr<VideoFrame>& frame,
const gfx::Rect& visible_rect,
@@ -429,14 +450,11 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
CHECK_NE(frame->format(), NATIVE_TEXTURE);
DCHECK(frame->visible_rect().Contains(visible_rect));
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
scoped_refptr<VideoFrame> wrapped_frame(
- new VideoFrame(frame->format(),
- frame->coded_size(),
- visible_rect,
- natural_size,
- scoped_ptr<gpu::MailboxHolder>(),
- frame->timestamp(),
- frame->end_of_stream()));
+ new VideoFrame(frame->format(), frame->coded_size(), visible_rect,
+ natural_size, mailbox_holders, TEXTURE_RGBA,
+ frame->timestamp(), frame->end_of_stream()));
for (size_t i = 0; i < NumPlanes(frame->format()); ++i) {
wrapped_frame->strides_[i] = frame->stride(i);
@@ -449,13 +467,10 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
// static
scoped_refptr<VideoFrame> VideoFrame::CreateEOSFrame() {
- return new VideoFrame(VideoFrame::UNKNOWN,
- gfx::Size(),
- gfx::Rect(),
- gfx::Size(),
- scoped_ptr<gpu::MailboxHolder>(),
- kNoTimestamp(),
- true);
+ gpu::MailboxHolder mailbox_holders[kMaxPlanes];
+ return new VideoFrame(VideoFrame::UNKNOWN, gfx::Size(), gfx::Rect(),
+ gfx::Size(), mailbox_holders, TEXTURE_RGBA,
+ kNoTimestamp(), true);
}
// static
@@ -502,14 +517,10 @@ scoped_refptr<VideoFrame> VideoFrame::CreateTransparentFrame(
scoped_refptr<VideoFrame> VideoFrame::CreateHoleFrame(
const gfx::Size& size) {
DCHECK(IsValidConfig(VideoFrame::HOLE, size, gfx::Rect(size), size));
+ gpu::MailboxHolder mailboxes[kMaxPlanes];
scoped_refptr<VideoFrame> frame(
- new VideoFrame(VideoFrame::HOLE,
- size,
- gfx::Rect(size),
- size,
- scoped_ptr<gpu::MailboxHolder>(),
- base::TimeDelta(),
- false));
+ new VideoFrame(VideoFrame::HOLE, size, gfx::Rect(size), size, mailboxes,
+ TEXTURE_RGBA, base::TimeDelta(), false));
return frame;
}
#endif // defined(VIDEO_HOLE)
@@ -522,12 +533,15 @@ size_t VideoFrame::NumPlanes(Format format) {
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
return 0;
+ case VideoFrame::ARGB:
+ return 1;
case VideoFrame::NV12:
return 2;
case VideoFrame::YV12:
case VideoFrame::YV16:
case VideoFrame::I420:
case VideoFrame::YV12J:
+ case VideoFrame::YV12HD:
case VideoFrame::YV24:
return 3;
case VideoFrame::YV12A:
@@ -539,6 +553,19 @@ size_t VideoFrame::NumPlanes(Format format) {
return 0;
}
+// static
+size_t VideoFrame::NumTextures(TextureFormat texture_format) {
+ switch (texture_format) {
+ case TEXTURE_RGBA:
+ case TEXTURE_RGB:
+ return 1;
+ case TEXTURE_YUV_420:
+ return 3;
+ }
+
+ NOTREACHED();
+ return 0;
+}
// static
size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
@@ -554,11 +581,15 @@ gfx::Size VideoFrame::PlaneSize(Format format,
const gfx::Size& coded_size) {
DCHECK(IsValidPlane(plane, format));
- // Align to multiple-of-two size overall. This ensures that non-subsampled
- // planes can be addressed by pixel with the same scaling as the subsampled
- // planes.
- const int width = RoundUp(coded_size.width(), 2);
- const int height = RoundUp(coded_size.height(), 2);
+ int width = coded_size.width();
+ int height = coded_size.height();
+ if (format != VideoFrame::ARGB) {
+ // Align to multiple-of-two size overall. This ensures that non-subsampled
+ // planes can be addressed by pixel with the same scaling as the subsampled
+ // planes.
+ width = RoundUp(width, 2);
+ height = RoundUp(height, 2);
+ }
const gfx::Size subsample = SampleSize(format, plane);
DCHECK(width % subsample.width() == 0);
@@ -570,7 +601,6 @@ gfx::Size VideoFrame::PlaneSize(Format format,
size_t VideoFrame::PlaneAllocationSize(Format format,
size_t plane,
const gfx::Size& coded_size) {
- // VideoFrame formats are (so far) all YUV and 1 byte per sample.
return PlaneSize(format, plane, coded_size).GetArea();
}
@@ -578,9 +608,16 @@ size_t VideoFrame::PlaneAllocationSize(Format format,
int VideoFrame::PlaneHorizontalBitsPerPixel(Format format, size_t plane) {
DCHECK(IsValidPlane(plane, format));
const int bits_per_element = 8 * BytesPerElement(format, plane);
- const int pixels_per_element = SampleSize(format, plane).GetArea();
- DCHECK(bits_per_element % pixels_per_element == 0);
- return bits_per_element / pixels_per_element;
+ const int horiz_pixels_per_element = SampleSize(format, plane).width();
+ DCHECK_EQ(bits_per_element % horiz_pixels_per_element, 0);
+ return bits_per_element / horiz_pixels_per_element;
+}
+
+// static
+int VideoFrame::PlaneBitsPerPixel(Format format, size_t plane) {
+ DCHECK(IsValidPlane(plane, format));
+ return PlaneHorizontalBitsPerPixel(format, plane) /
+ SampleSize(format, plane).height();
}
// Release data allocated by AllocateYUV().
@@ -591,8 +628,9 @@ static void ReleaseData(uint8* data) {
void VideoFrame::AllocateYUV() {
DCHECK(format_ == YV12 || format_ == YV16 || format_ == YV12A ||
- format_ == I420 || format_ == YV12J || format_ == YV24);
- COMPILE_ASSERT(0 == kYPlane, y_plane_data_must_be_index_0);
+ format_ == I420 || format_ == YV12J || format_ == YV24 ||
+ format_ == YV12HD);
+ static_assert(0 == kYPlane, "y plane data must be index 0");
size_t data_size = 0;
size_t offset[kMaxPlanes];
@@ -630,26 +668,29 @@ VideoFrame::VideoFrame(VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- scoped_ptr<gpu::MailboxHolder> mailbox_holder,
+ const gpu::MailboxHolder(&mailbox_holders)[kMaxPlanes],
+ VideoFrame::TextureFormat texture_format,
base::TimeDelta timestamp,
bool end_of_stream)
: format_(format),
+ texture_format_(texture_format),
coded_size_(coded_size),
visible_rect_(visible_rect),
natural_size_(natural_size),
- mailbox_holder_(mailbox_holder.Pass()),
shared_memory_handle_(base::SharedMemory::NULLHandle()),
+ shared_memory_offset_(0),
timestamp_(timestamp),
release_sync_point_(0),
- end_of_stream_(end_of_stream) {
+ end_of_stream_(end_of_stream),
+ allow_overlay_(false) {
DCHECK(IsValidConfig(format_, coded_size_, visible_rect_, natural_size_));
-
+ memcpy(&mailbox_holders_, mailbox_holders, sizeof(mailbox_holders_));
memset(&strides_, 0, sizeof(strides_));
memset(&data_, 0, sizeof(data_));
}
VideoFrame::~VideoFrame() {
- if (!mailbox_holder_release_cb_.is_null()) {
+ if (!mailbox_holders_release_cb_.is_null()) {
uint32 release_sync_point;
{
// To ensure that changes to |release_sync_point_| are visible on this
@@ -657,7 +698,7 @@ VideoFrame::~VideoFrame() {
base::AutoLock locker(release_sync_point_lock_);
release_sync_point = release_sync_point_;
}
- base::ResetAndReturn(&mailbox_holder_release_cb_).Run(release_sync_point);
+ base::ResetAndReturn(&mailbox_holders_release_cb_).Run(release_sync_point);
}
if (!no_longer_needed_cb_.is_null())
base::ResetAndReturn(&no_longer_needed_cb_).Run();
@@ -735,20 +776,25 @@ uint8* VideoFrame::visible_data(size_t plane) {
static_cast<const VideoFrame*>(this)->visible_data(plane));
}
-const gpu::MailboxHolder* VideoFrame::mailbox_holder() const {
+const gpu::MailboxHolder& VideoFrame::mailbox_holder(size_t texture) const {
DCHECK_EQ(format_, NATIVE_TEXTURE);
- return mailbox_holder_.get();
+ DCHECK_LT(texture, NumTextures(texture_format_));
+ return mailbox_holders_[texture];
}
base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
return shared_memory_handle_;
}
+size_t VideoFrame::shared_memory_offset() const {
+ return shared_memory_offset_;
+}
+
void VideoFrame::UpdateReleaseSyncPoint(SyncPointClient* client) {
DCHECK_EQ(format_, NATIVE_TEXTURE);
base::AutoLock locker(release_sync_point_lock_);
// Must wait on the previous sync point before inserting a new sync point so
- // that |mailbox_holder_release_cb_| guarantees the previous sync point
+ // that |mailbox_holders_release_cb_| guarantees the previous sync point
// occurred when it waits on |release_sync_point_|.
if (release_sync_point_)
client->WaitSyncPoint(release_sync_point_);
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index ca2a2e28e0e..7312c1136ea 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -11,21 +11,17 @@
#include "base/md5.h"
#include "base/memory/shared_memory.h"
#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/buffers.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
+#include "media/base/video_frame_metadata.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/size.h"
#if defined(OS_MACOSX)
#include <CoreVideo/CVPixelBuffer.h>
#include "base/mac/scoped_cftyperef.h"
#endif
-class SkBitmap;
-
-namespace gpu {
-struct MailboxHolder;
-} // namespace gpu
-
namespace media {
class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
@@ -40,6 +36,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
kMaxPlanes = 4,
kYPlane = 0,
+ kARGBPlane = kYPlane,
kUPlane = 1,
kUVPlane = kUPlane,
kVPlane = 2,
@@ -52,18 +49,29 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Logged to UMA, so never reuse values.
enum Format {
UNKNOWN = 0, // Unknown format value.
- YV12 = 1, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
- YV16 = 2, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
- I420 = 3, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
- YV12A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
+ YV12 = 1, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
+ YV16 = 2, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
+ I420 = 3, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
+ YV12A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
#if defined(VIDEO_HOLE)
- HOLE = 5, // Hole frame.
+ HOLE = 5, // Hole frame.
#endif // defined(VIDEO_HOLE)
NATIVE_TEXTURE = 6, // Native texture. Pixel-format agnostic.
YV12J = 7, // JPEG color range version of YV12
NV12 = 8, // 12bpp 1x1 Y plane followed by an interleaved 2x2 UV plane.
YV24 = 9, // 24bpp YUV planar, no subsampling.
- FORMAT_MAX = YV24, // Must always be equal to largest entry logged.
+ ARGB = 10, // 32bpp ARGB, 1 plane.
+ YV12HD = 11, // Rec709 "HD" color space version of YV12
+ // Please update UMA histogram enumeration when adding new formats here.
+ FORMAT_MAX = YV12HD, // Must always be equal to largest entry logged.
+ };
+
+ // Defines the internal format and the number of the textures in the mailbox
+ // holders.
+ enum TextureFormat {
+ TEXTURE_RGBA, // One RGBA texture.
+ TEXTURE_RGB, // One RGB texture.
+ TEXTURE_YUV_420, // 3 RED textures one per channel. UV are 2x2 subsampled.
};
// Returns the name of a Format as a string.
@@ -85,39 +93,42 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Call prior to CreateFrame to ensure validity of frame configuration. Called
// automatically by VideoDecoderConfig::IsValidConfig().
// TODO(scherkus): VideoDecoderConfig shouldn't call this method
- static bool IsValidConfig(Format format, const gfx::Size& coded_size,
+ static bool IsValidConfig(Format format,
+ const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size);
- // CB to write pixels from the texture backing this frame into the
- // |const SkBitmap&| parameter.
- typedef base::Callback<void(const SkBitmap&)> ReadPixelsCB;
-
// CB to be called on the mailbox backing this frame when the frame is
// destroyed.
typedef base::Callback<void(uint32)> ReleaseMailboxCB;
- // Wraps a native texture of the given parameters with a VideoFrame. The
- // backing of the VideoFrame is held in the mailbox held by |mailbox_holder|,
- // and |mailbox_holder_release_cb| will be called with |mailbox_holder| as the
- // argument when the VideoFrame is to be destroyed.
- // |read_pixels_cb| may be used to do (slow!) readbacks from the
- // texture to main memory.
+ // Wraps a native texture of the given parameters with a VideoFrame.
+ // The backing of the VideoFrame is held in the mailbox held by
+ // |mailbox_holder|, and |mailbox_holder_release_cb| will be called with
+ // a syncpoint as the argument when the VideoFrame is to be destroyed.
static scoped_refptr<VideoFrame> WrapNativeTexture(
- scoped_ptr<gpu::MailboxHolder> mailbox_holder,
+ const gpu::MailboxHolder& mailbox_holder,
const ReleaseMailboxCB& mailbox_holder_release_cb,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp,
- const ReadPixelsCB& read_pixels_cb);
+ bool allow_overlay,
+ bool has_alpha);
-#if !defined(MEDIA_FOR_CAST_IOS)
- // Read pixels from the native texture backing |*this| and write
- // them to |pixels| as BGRA. |pixels| must point to a buffer at
- // least as large as 4 * visible_rect().size().GetArea().
- void ReadPixelsFromNativeTexture(const SkBitmap& pixels);
-#endif
+ // Wraps a set of native textures representing YUV data with a VideoFrame.
+ // |mailbox_holders_release_cb| will be called with a syncpoint as the
+ // argument when the VideoFrame is to be destroyed.
+ static scoped_refptr<VideoFrame> WrapYUV420NativeTextures(
+ const gpu::MailboxHolder& y_mailbox_holder,
+ const gpu::MailboxHolder& u_mailbox_holder,
+ const gpu::MailboxHolder& v_mailbox_holder,
+ const ReleaseMailboxCB& mailbox_holders_release_cb,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp,
+ bool allow_overlay);
// Wraps packed image data residing in a memory buffer with a VideoFrame.
// The image data resides in |data| and is assumed to be packed tightly in a
@@ -134,6 +145,24 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
uint8* data,
size_t data_size,
base::SharedMemoryHandle handle,
+ size_t shared_memory_offset,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb);
+
+ // Wraps external YUV data of the given parameters with a VideoFrame.
+ // The returned VideoFrame does not own the data passed in. When the frame
+ // is destroyed |no_longer_needed_cb.Run()| will be called.
+ static scoped_refptr<VideoFrame> WrapExternalYuvData(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ int32 y_stride,
+ int32 u_stride,
+ int32 v_stride,
+ uint8* y_data,
+ uint8* u_data,
+ uint8* v_data,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb);
@@ -173,25 +202,6 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp);
#endif
- // Wraps external YUV data of the given parameters with a VideoFrame.
- // The returned VideoFrame does not own the data passed in. When the frame
- // is destroyed |no_longer_needed_cb.Run()| will be called.
- // TODO(sheu): merge this into WrapExternalSharedMemory().
- // http://crbug.com/270217
- static scoped_refptr<VideoFrame> WrapExternalYuvData(
- Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- int32 y_stride,
- int32 u_stride,
- int32 v_stride,
- uint8* y_data,
- uint8* u_data,
- uint8* v_data,
- base::TimeDelta timestamp,
- const base::Closure& no_longer_needed_cb);
-
// Wraps |frame| and calls |no_longer_needed_cb| when the wrapper VideoFrame
// gets destroyed. |visible_rect| must be a sub rect within
// frame->visible_rect().
@@ -226,6 +236,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
static size_t NumPlanes(Format format);
+ static size_t NumTextures(TextureFormat texture_format);
+
// Returns the required allocation size for a (tightly packed) frame of the
// given coded size and format.
static size_t AllocationSize(Format format, const gfx::Size& coded_size);
@@ -245,6 +257,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Returns horizontal bits per pixel for given |plane| and |format|.
static int PlaneHorizontalBitsPerPixel(Format format, size_t plane);
+ // Returns bits per pixel for given |plane| and |format|.
+ static int PlaneBitsPerPixel(Format format, size_t plane);
+
// Returns the number of bytes per row for the given plane, format, and width.
// The width may be aligned to format requirements.
static size_t RowBytes(size_t plane, Format format, int width);
@@ -259,6 +274,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
Format format() const { return format_; }
+ TextureFormat texture_format() const { return texture_format_; }
+
const gfx::Size& coded_size() const { return coded_size_; }
const gfx::Rect& visible_rect() const { return visible_rect_; }
const gfx::Size& natural_size() const { return natural_size_; }
@@ -284,14 +301,28 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const uint8* visible_data(size_t plane) const;
uint8* visible_data(size_t plane);
- // Returns the mailbox holder of the native texture wrapped by this frame.
+ // Returns a mailbox holder for a given texture.
// Only valid to call if this is a NATIVE_TEXTURE frame. Before using the
// mailbox, the caller must wait for the included sync point.
- const gpu::MailboxHolder* mailbox_holder() const;
+ const gpu::MailboxHolder& mailbox_holder(size_t texture) const;
// Returns the shared-memory handle, if present
base::SharedMemoryHandle shared_memory_handle() const;
+ // Returns the offset into the shared memory where the frame data begins.
+ size_t shared_memory_offset() const;
+
+ // Returns a dictionary of optional metadata. This contains information
+ // associated with the frame that downstream clients might use for frame-level
+ // logging, quality/performance optimizations, signaling, etc.
+ //
+ // TODO(miu): Move some of the "extra" members of VideoFrame (below) into
+ // here as a later clean-up step.
+ const VideoFrameMetadata* metadata() const { return &metadata_; }
+ VideoFrameMetadata* metadata() { return &metadata_; }
+
+ bool allow_overlay() const { return allow_overlay_; }
+
#if defined(OS_POSIX)
// Returns backing dmabuf file descriptor for given |plane|, if present.
int dmabuf_fd(size_t plane) const;
@@ -341,7 +372,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- scoped_ptr<gpu::MailboxHolder> mailbox_holder,
+ const gpu::MailboxHolder(&mailbox_holders)[kMaxPlanes],
+ TextureFormat texture_format,
base::TimeDelta timestamp,
bool end_of_stream);
virtual ~VideoFrame();
@@ -351,6 +383,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Frame format.
const Format format_;
+ // Format of the native textures associated with this frame.
+ const TextureFormat texture_format_;
+
// Width and height of the video frame, in pixels. This must include pixel
// data for the whole image; i.e. for YUV formats with subsampled chroma
// planes, in the case that the visible portion of the image does not line up
@@ -375,14 +410,16 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Array of data pointers to each plane.
uint8* data_[kMaxPlanes];
- // Native texture mailbox, if this is a NATIVE_TEXTURE frame.
- const scoped_ptr<gpu::MailboxHolder> mailbox_holder_;
- ReleaseMailboxCB mailbox_holder_release_cb_;
- ReadPixelsCB read_pixels_cb_;
+ // Native texture mailboxes, if this is a NATIVE_TEXTURE frame.
+ gpu::MailboxHolder mailbox_holders_[kMaxPlanes];
+ ReleaseMailboxCB mailbox_holders_release_cb_;
// Shared memory handle, if this frame was allocated from shared memory.
base::SharedMemoryHandle shared_memory_handle_;
+ // Offset in shared memory buffer.
+ size_t shared_memory_offset_;
+
#if defined(OS_POSIX)
// Dmabufs for each plane, if this frame is wrapping memory
// acquired via dmabuf.
@@ -403,6 +440,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const bool end_of_stream_;
+ VideoFrameMetadata metadata_;
+
+ bool allow_overlay_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
};
diff --git a/chromium/media/base/video_frame_metadata.cc b/chromium/media/base/video_frame_metadata.cc
new file mode 100644
index 00000000000..d14bbe9a318
--- /dev/null
+++ b/chromium/media/base/video_frame_metadata.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "media/base/video_frame_metadata.h"
+
+namespace media {
+
+namespace {
+
+// Map enum key to internal std::string key used by base::DictionaryValue.
+inline std::string ToInternalKey(VideoFrameMetadata::Key key) {
+ DCHECK_LT(key, VideoFrameMetadata::NUM_KEYS);
+ return base::IntToString(static_cast<int>(key));
+}
+
+} // namespace
+
+VideoFrameMetadata::VideoFrameMetadata() {}
+
+VideoFrameMetadata::~VideoFrameMetadata() {}
+
+bool VideoFrameMetadata::HasKey(Key key) const {
+ return dictionary_.HasKey(ToInternalKey(key));
+}
+
+void VideoFrameMetadata::SetBoolean(Key key, bool value) {
+ dictionary_.SetBooleanWithoutPathExpansion(ToInternalKey(key), value);
+}
+
+void VideoFrameMetadata::SetInteger(Key key, int value) {
+ dictionary_.SetIntegerWithoutPathExpansion(ToInternalKey(key), value);
+}
+
+void VideoFrameMetadata::SetDouble(Key key, double value) {
+ dictionary_.SetDoubleWithoutPathExpansion(ToInternalKey(key), value);
+}
+
+void VideoFrameMetadata::SetString(Key key, const std::string& value) {
+ dictionary_.SetWithoutPathExpansion(
+ ToInternalKey(key),
+ // Using BinaryValue since we don't want the |value| interpreted as having
+ // any particular character encoding (e.g., UTF-8) by
+ // base::DictionaryValue.
+ base::BinaryValue::CreateWithCopiedBuffer(value.data(), value.size()));
+}
+
+void VideoFrameMetadata::SetTimeTicks(Key key, const base::TimeTicks& value) {
+ const int64 internal_value = value.ToInternalValue();
+ dictionary_.SetWithoutPathExpansion(
+ ToInternalKey(key),
+ base::BinaryValue::CreateWithCopiedBuffer(
+ reinterpret_cast<const char*>(&internal_value),
+ sizeof(internal_value)));
+}
+
+void VideoFrameMetadata::SetValue(Key key, scoped_ptr<base::Value> value) {
+ dictionary_.SetWithoutPathExpansion(ToInternalKey(key), value.Pass());
+}
+
+bool VideoFrameMetadata::GetBoolean(Key key, bool* value) const {
+ DCHECK(value);
+ return dictionary_.GetBooleanWithoutPathExpansion(ToInternalKey(key), value);
+}
+
+bool VideoFrameMetadata::GetInteger(Key key, int* value) const {
+ DCHECK(value);
+ return dictionary_.GetIntegerWithoutPathExpansion(ToInternalKey(key), value);
+}
+
+bool VideoFrameMetadata::GetDouble(Key key, double* value) const {
+ DCHECK(value);
+ return dictionary_.GetDoubleWithoutPathExpansion(ToInternalKey(key), value);
+}
+
+bool VideoFrameMetadata::GetString(Key key, std::string* value) const {
+ DCHECK(value);
+ const base::BinaryValue* const binary_value = GetBinaryValue(key);
+ if (binary_value)
+ value->assign(binary_value->GetBuffer(), binary_value->GetSize());
+ return !!binary_value;
+}
+
+bool VideoFrameMetadata::GetTimeTicks(Key key, base::TimeTicks* value) const {
+ DCHECK(value);
+ const base::BinaryValue* const binary_value = GetBinaryValue(key);
+ if (binary_value && binary_value->GetSize() == sizeof(int64)) {
+ int64 internal_value;
+ memcpy(&internal_value, binary_value->GetBuffer(), sizeof(internal_value));
+ *value = base::TimeTicks::FromInternalValue(internal_value);
+ return true;
+ }
+ return false;
+}
+
+const base::Value* VideoFrameMetadata::GetValue(Key key) const {
+ const base::Value* result = nullptr;
+ if (!dictionary_.GetWithoutPathExpansion(ToInternalKey(key), &result))
+ return nullptr;
+ return result;
+}
+
+void VideoFrameMetadata::MergeInternalValuesInto(
+ base::DictionaryValue* out) const {
+ out->MergeDictionary(&dictionary_);
+}
+
+void VideoFrameMetadata::MergeInternalValuesFrom(
+ const base::DictionaryValue& in) {
+ dictionary_.MergeDictionary(&in);
+}
+
+const base::BinaryValue* VideoFrameMetadata::GetBinaryValue(Key key) const {
+ const base::Value* internal_value = nullptr;
+ if (dictionary_.GetWithoutPathExpansion(ToInternalKey(key),
+ &internal_value) &&
+ internal_value->GetType() == base::Value::TYPE_BINARY) {
+ return static_cast<const base::BinaryValue*>(internal_value);
+ }
+ return nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_frame_metadata.h b/chromium/media/base/video_frame_metadata.h
new file mode 100644
index 00000000000..31fbe749892
--- /dev/null
+++ b/chromium/media/base/video_frame_metadata.h
@@ -0,0 +1,70 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_FRAME_METADATA_H_
+#define MEDIA_BASE_VIDEO_FRAME_METADATA_H_
+
+#include "base/compiler_specific.h"
+#include "base/time/time.h"
+#include "base/values.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT VideoFrameMetadata {
+ public:
+ enum Key {
+ // Video capture begin/end timestamps. Consumers can use these values for
+ // dynamic optimizations, logging stats, etc. Use Get/SetTimeTicks() for
+ // these keys.
+ CAPTURE_BEGIN_TIME,
+ CAPTURE_END_TIME,
+
+ // Represents either the fixed frame rate, or the maximum frame rate to
+ // expect from a variable-rate source. Use Get/SetDouble() for this key.
+ FRAME_RATE,
+
+ NUM_KEYS
+ };
+
+ VideoFrameMetadata();
+ ~VideoFrameMetadata();
+
+ bool HasKey(Key key) const;
+
+ void Clear() { dictionary_.Clear(); }
+
+ // Setters. Overwrites existing value, if present.
+ void SetBoolean(Key key, bool value);
+ void SetInteger(Key key, int value);
+ void SetDouble(Key key, double value);
+ void SetString(Key key, const std::string& value);
+ void SetTimeTicks(Key key, const base::TimeTicks& value);
+ void SetValue(Key key, scoped_ptr<base::Value> value);
+
+ // Getters. Returns true if |key| was present and has the value has been set.
+ bool GetBoolean(Key key, bool* value) const WARN_UNUSED_RESULT;
+ bool GetInteger(Key key, int* value) const WARN_UNUSED_RESULT;
+ bool GetDouble(Key key, double* value) const WARN_UNUSED_RESULT;
+ bool GetString(Key key, std::string* value) const WARN_UNUSED_RESULT;
+ bool GetTimeTicks(Key key, base::TimeTicks* value) const WARN_UNUSED_RESULT;
+
+ // Returns null if |key| was not present.
+ const base::Value* GetValue(Key key) const WARN_UNUSED_RESULT;
+
+ // For serialization.
+ void MergeInternalValuesInto(base::DictionaryValue* out) const;
+ void MergeInternalValuesFrom(const base::DictionaryValue& in);
+
+ private:
+ const base::BinaryValue* GetBinaryValue(Key key) const;
+
+ base::DictionaryValue dictionary_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameMetadata);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_FRAME_METADATA_H_
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
index 38485c2e1ce..e22a8184228 100644
--- a/chromium/media/base/video_frame_unittest.cc
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -252,14 +252,15 @@ TEST(VideoFrame, TextureNoLongerNeededCallbackIsCalled) {
{
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- make_scoped_ptr(
- new gpu::MailboxHolder(gpu::Mailbox(), 5, 0 /* sync_point */)),
+ gpu::MailboxHolder(gpu::Mailbox(), 5, 0 /* sync_point */),
base::Bind(&TextureCallback, &called_sync_point),
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
- VideoFrame::ReadPixelsCB()); // read_pixels_cb
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ false, // allow_overlay
+ true); // has_alpha
+ EXPECT_EQ(VideoFrame::TEXTURE_RGBA, frame->texture_format());
}
// Nobody set a sync point to |frame|, so |frame| set |called_sync_point| to 0
// as default value.
@@ -284,33 +285,44 @@ class SyncPointClientImpl : public VideoFrame::SyncPointClient {
// Verify the gpu::MailboxHolder::ReleaseCallback is called when VideoFrame is
// destroyed with the release sync point, which was updated by clients.
// (i.e. the compositor, webgl).
-TEST(VideoFrame, TextureNoLongerNeededCallbackAfterTakingAndReleasingMailbox) {
- gpu::Mailbox mailbox;
- mailbox.name[0] = 50;
+TEST(VideoFrame,
+ TexturesNoLongerNeededCallbackAfterTakingAndReleasingMailboxes) {
+ const int kPlanesNum = 3;
+ gpu::Mailbox mailbox[kPlanesNum];
+ for (int i = 0; i < kPlanesNum; ++i) {
+ mailbox[i].name[0] = 50 + 1;
+ }
+
uint32 sync_point = 7;
uint32 target = 9;
uint32 release_sync_point = 111;
uint32 called_sync_point = 0;
-
{
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new gpu::MailboxHolder(mailbox, target, sync_point)),
+ scoped_refptr<VideoFrame> frame = VideoFrame::WrapYUV420NativeTextures(
+ gpu::MailboxHolder(mailbox[VideoFrame::kYPlane], target, sync_point),
+ gpu::MailboxHolder(mailbox[VideoFrame::kUPlane], target, sync_point),
+ gpu::MailboxHolder(mailbox[VideoFrame::kVPlane], target, sync_point),
base::Bind(&TextureCallback, &called_sync_point),
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
- VideoFrame::ReadPixelsCB()); // read_pixels_cb
-
- const gpu::MailboxHolder* mailbox_holder = frame->mailbox_holder();
-
- EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox.name[0]);
- EXPECT_EQ(target, mailbox_holder->texture_target);
- EXPECT_EQ(sync_point, mailbox_holder->sync_point);
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ false); // allow_overlay
+
+ EXPECT_EQ(VideoFrame::TEXTURE_YUV_420, frame->texture_format());
+ EXPECT_EQ(3u, VideoFrame::NumTextures(frame->texture_format()));
+ for (size_t i = 0; i < VideoFrame::NumTextures(frame->texture_format());
+ ++i) {
+ const gpu::MailboxHolder& mailbox_holder = frame->mailbox_holder(i);
+ EXPECT_EQ(mailbox[i].name[0], mailbox_holder.mailbox.name[0]);
+ EXPECT_EQ(target, mailbox_holder.texture_target);
+ EXPECT_EQ(sync_point, mailbox_holder.sync_point);
+ }
SyncPointClientImpl client(release_sync_point);
frame->UpdateReleaseSyncPoint(&client);
- EXPECT_EQ(sync_point, mailbox_holder->sync_point);
+ EXPECT_EQ(sync_point,
+ frame->mailbox_holder(VideoFrame::kYPlane).sync_point);
}
EXPECT_EQ(release_sync_point, called_sync_point);
}
@@ -328,4 +340,82 @@ TEST(VideoFrame, ZeroInitialized) {
EXPECT_EQ(0, frame->data(i)[0]);
}
+TEST(VideoFrameMetadata, SetAndThenGetAllKeysForAllTypes) {
+ VideoFrameMetadata metadata;
+
+ for (int i = 0; i < VideoFrameMetadata::NUM_KEYS; ++i) {
+ const VideoFrameMetadata::Key key = static_cast<VideoFrameMetadata::Key>(i);
+
+ EXPECT_FALSE(metadata.HasKey(key));
+ metadata.SetBoolean(key, true);
+ EXPECT_TRUE(metadata.HasKey(key));
+ bool bool_value = false;
+ EXPECT_TRUE(metadata.GetBoolean(key, &bool_value));
+ EXPECT_EQ(true, bool_value);
+ metadata.Clear();
+
+ EXPECT_FALSE(metadata.HasKey(key));
+ metadata.SetInteger(key, i);
+ EXPECT_TRUE(metadata.HasKey(key));
+ int int_value = -999;
+ EXPECT_TRUE(metadata.GetInteger(key, &int_value));
+ EXPECT_EQ(i, int_value);
+ metadata.Clear();
+
+ EXPECT_FALSE(metadata.HasKey(key));
+ metadata.SetDouble(key, 3.14 * i);
+ EXPECT_TRUE(metadata.HasKey(key));
+ double double_value = -999.99;
+ EXPECT_TRUE(metadata.GetDouble(key, &double_value));
+ EXPECT_EQ(3.14 * i, double_value);
+ metadata.Clear();
+
+ EXPECT_FALSE(metadata.HasKey(key));
+ metadata.SetString(key, base::StringPrintf("\xfe%d\xff", i));
+ EXPECT_TRUE(metadata.HasKey(key));
+ std::string string_value;
+ EXPECT_TRUE(metadata.GetString(key, &string_value));
+ EXPECT_EQ(base::StringPrintf("\xfe%d\xff", i), string_value);
+ metadata.Clear();
+
+ EXPECT_FALSE(metadata.HasKey(key));
+ metadata.SetTimeTicks(key, base::TimeTicks::FromInternalValue(~(0LL) + i));
+ EXPECT_TRUE(metadata.HasKey(key));
+ base::TimeTicks ticks_value;
+ EXPECT_TRUE(metadata.GetTimeTicks(key, &ticks_value));
+ EXPECT_EQ(base::TimeTicks::FromInternalValue(~(0LL) + i), ticks_value);
+ metadata.Clear();
+
+ EXPECT_FALSE(metadata.HasKey(key));
+ metadata.SetValue(key, base::Value::CreateNullValue());
+ EXPECT_TRUE(metadata.HasKey(key));
+ const base::Value* const null_value = metadata.GetValue(key);
+ EXPECT_TRUE(null_value);
+ EXPECT_EQ(base::Value::TYPE_NULL, null_value->GetType());
+ metadata.Clear();
+ }
+}
+
+TEST(VideoFrameMetadata, PassMetadataViaIntermediary) {
+ VideoFrameMetadata expected;
+ for (int i = 0; i < VideoFrameMetadata::NUM_KEYS; ++i) {
+ const VideoFrameMetadata::Key key = static_cast<VideoFrameMetadata::Key>(i);
+ expected.SetInteger(key, i);
+ }
+
+ base::DictionaryValue tmp;
+ expected.MergeInternalValuesInto(&tmp);
+ EXPECT_EQ(static_cast<size_t>(VideoFrameMetadata::NUM_KEYS), tmp.size());
+
+ VideoFrameMetadata result;
+ result.MergeInternalValuesFrom(tmp);
+
+ for (int i = 0; i < VideoFrameMetadata::NUM_KEYS; ++i) {
+ const VideoFrameMetadata::Key key = static_cast<VideoFrameMetadata::Key>(i);
+ int value = -1;
+ EXPECT_TRUE(result.GetInteger(key, &value));
+ EXPECT_EQ(i, value);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/base/video_renderer.h b/chromium/media/base/video_renderer.h
index fc544bc2931..454c6644127 100644
--- a/chromium/media/base/video_renderer.h
+++ b/chromium/media/base/video_renderer.h
@@ -9,18 +9,21 @@
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/buffering_state.h"
+#include "media/base/decryptor.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
+#include "media/base/time_source.h"
namespace media {
class DemuxerStream;
class VideoDecoder;
+class VideoFrame;
class MEDIA_EXPORT VideoRenderer {
public:
- // Used to query the current time or duration of the media.
- typedef base::Callback<base::TimeDelta()> TimeDeltaCB;
+ // Used to paint VideoFrame.
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> PaintCB;
VideoRenderer();
@@ -28,7 +31,11 @@ class MEDIA_EXPORT VideoRenderer {
virtual ~VideoRenderer();
// Initializes a VideoRenderer with |stream|, executing |init_cb| upon
- // completion.
+ // completion. If initialization fails, only |init_cb| (not |error_cb|) will
+ // be called.
+ //
+ // |set_decryptor_ready_cb| is fired when a Decryptor is needed, i.e. when the
+ // |stream| is encrypted.
//
// |statistics_cb| is executed periodically with video rendering stats, such
// as dropped frames.
@@ -38,17 +45,23 @@ class MEDIA_EXPORT VideoRenderer {
//
// |ended_cb| is executed when video rendering has reached the end of stream.
//
- // |error_cb| is executed if an error was encountered.
+ // |error_cb| is executed if an error was encountered after initialization.
+ //
+ // |wall_clock_time_cb| is used to convert media timestamps into wallclock
+ // timestamps.
//
- // |get_time_cb| is used to query the current media playback time.
- virtual void Initialize(DemuxerStream* stream,
- bool low_delay,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb) = 0;
+ // |waiting_for_decryption_key_cb| is executed whenever the key needed to
+ // decrypt the stream is not available.
+ virtual void Initialize(
+ DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const TimeSource::WallClockTimeCB& wall_clock_time_cb,
+ const base::Closure& waiting_for_decryption_key_cb) = 0;
// Discards any video data and stops reading from |stream|, executing
// |callback| when completed.
@@ -63,6 +76,11 @@ class MEDIA_EXPORT VideoRenderer {
// Only valid to call after a successful Initialize() or Flush().
virtual void StartPlayingFrom(base::TimeDelta timestamp) = 0;
+ // Called when time starts or stops moving. Time progresses when a base time
+ // has been set and the playback rate is > 0. If either condition changes,
+ // |time_progressing| will be false.
+ virtual void OnTimeStateChanged(bool time_progressing) = 0;
+
private:
DISALLOW_COPY_AND_ASSIGN(VideoRenderer);
};
diff --git a/chromium/media/base/video_renderer_sink.h b/chromium/media/base/video_renderer_sink.h
new file mode 100644
index 00000000000..5a51a73fc92
--- /dev/null
+++ b/chromium/media/base/video_renderer_sink.h
@@ -0,0 +1,73 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_RENDERER_SINK_H_
+#define MEDIA_BASE_VIDEO_RENDERER_SINK_H_
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+// VideoRendererSink is an interface representing the end-point for rendered
+// video frames. An implementation is expected to periodically call Render() on
+// a callback object.
+class MEDIA_EXPORT VideoRendererSink {
+ public:
+ class RenderCallback {
+ public:
+ // Returns a VideoFrame for rendering which should be displayed within the
+ // presentation interval [|deadline_min|, |deadline_max|]. Returns NULL if
+ // no frame or no new frame (since the last Render() call) is available for
+ // rendering within the requested interval. Intervals are expected to be
+ // regular, contiguous, and monotonically increasing. Irregular intervals
+ // may affect the rendering decisions made by the underlying callback.
+ //
+ // If |background_rendering| is true, the VideoRenderSink is pumping
+ // callbacks at a lower frequency than normal and the results of the
+ // Render() call may not be used.
+ virtual scoped_refptr<VideoFrame> Render(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ bool background_rendering) = 0;
+
+ // Called by the sink when a VideoFrame previously returned via Render() was
+ // not actually rendered. Must be called before the next Render() call.
+ virtual void OnFrameDropped() = 0;
+
+ virtual ~RenderCallback() {}
+ };
+
+ // Starts video rendering. See RenderCallback for more details. Must be
+ // called to receive Render() callbacks. Callbacks may start immediately, so
+ // |callback| must be ready to receive callbacks before calling Start().
+ virtual void Start(RenderCallback* callback) = 0;
+
+ // Stops video rendering, waits for any outstanding calls to the |callback|
+ // given to Start() to complete before returning. No new calls to |callback|
+ // will be issued after this method returns. May be used as a means of power
+ // conservation by the sink implementation, so clients should call this
+ // liberally if no new frames are expected.
+ virtual void Stop() = 0;
+
+ // Instead of using a callback driven rendering path, allow clients to paint
+ // frames as they see fit without regard for the compositor.
+ // TODO(dalecurtis): This should be nuked once experiments show the new path
+ // is amazing and the old path is not! http://crbug.com/439548
+ virtual void PaintFrameUsingOldRenderingPath(
+ const scoped_refptr<VideoFrame>& frame) = 0;
+
+ // TODO(dalecurtis): We may need OnSizeChanged() and OnOpacityChanged()
+ // methods on this interface if background rendering is handled inside of
+ // the media layer instead of by VideoFrameCompositor.
+
+ virtual ~VideoRendererSink() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_RENDERER_SINK_H_
diff --git a/chromium/media/base/video_util.cc b/chromium/media/base/video_util.cc
index d7946743eae..e04a5a3c543 100644
--- a/chromium/media/base/video_util.cc
+++ b/chromium/media/base/video_util.cc
@@ -270,6 +270,23 @@ void RotatePlaneByPixels(
}
}
+// Common logic for the letterboxing and scale-within/scale-encompassing
+// functions. Scales |size| to either fit within or encompass |target|,
+// depending on whether |fit_within_target| is true.
+static gfx::Size ScaleSizeToTarget(const gfx::Size& size,
+ const gfx::Size& target,
+ bool fit_within_target) {
+ if (size.IsEmpty())
+ return gfx::Size(); // Corner case: Aspect ratio is undefined.
+
+ const int64 x = static_cast<int64>(size.width()) * target.height();
+ const int64 y = static_cast<int64>(size.height()) * target.width();
+ const bool use_target_width = fit_within_target ? (y < x) : (x < y);
+ return use_target_width ?
+ gfx::Size(target.width(), static_cast<int>(y / size.width())) :
+ gfx::Size(static_cast<int>(x / size.height()), target.height());
+}
+
gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
const gfx::Size& content) {
// If |content| has an undefined aspect ratio, let's not try to divide by
@@ -277,19 +294,33 @@ gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
if (content.IsEmpty())
return gfx::Rect();
- int64 x = static_cast<int64>(content.width()) * bounds.height();
- int64 y = static_cast<int64>(content.height()) * bounds.width();
-
- gfx::Size letterbox(bounds.width(), bounds.height());
- if (y < x)
- letterbox.set_height(static_cast<int>(y / content.width()));
- else
- letterbox.set_width(static_cast<int>(x / content.height()));
gfx::Rect result = bounds;
- result.ClampToCenteredSize(letterbox);
+ result.ClampToCenteredSize(ScaleSizeToTarget(content, bounds.size(), true));
return result;
}
+gfx::Size ScaleSizeToFitWithinTarget(const gfx::Size& size,
+ const gfx::Size& target) {
+ return ScaleSizeToTarget(size, target, true);
+}
+
+gfx::Size ScaleSizeToEncompassTarget(const gfx::Size& size,
+ const gfx::Size& target) {
+ return ScaleSizeToTarget(size, target, false);
+}
+
+gfx::Size PadToMatchAspectRatio(const gfx::Size& size,
+ const gfx::Size& target) {
+ if (target.IsEmpty())
+ return gfx::Size(); // Aspect ratio is undefined.
+
+ const int64 x = static_cast<int64>(size.width()) * target.height();
+ const int64 y = static_cast<int64>(size.height()) * target.width();
+ if (x < y)
+ return gfx::Size(static_cast<int>(y / target.height()), size.height());
+ return gfx::Size(size.width(), static_cast<int>(x / target.width()));
+}
+
void CopyRGBToVideoFrame(const uint8* source,
int stride,
const gfx::Rect& region_in_frame,
diff --git a/chromium/media/base/video_util.h b/chromium/media/base/video_util.h
index 702e620dc75..abbcad41abf 100644
--- a/chromium/media/base/video_util.h
+++ b/chromium/media/base/video_util.h
@@ -7,8 +7,8 @@
#include "base/basictypes.h"
#include "media/base/media_export.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -84,6 +84,29 @@ MEDIA_EXPORT void RotatePlaneByPixels(
MEDIA_EXPORT gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
const gfx::Size& content);
+// Return a scaled |size| whose area is less than or equal to |target|, where
+// one of its dimensions is equal to |target|'s. The aspect ratio of |size| is
+// preserved as closely as possible. If |size| is empty, the result will be
+// empty.
+MEDIA_EXPORT gfx::Size ScaleSizeToFitWithinTarget(const gfx::Size& size,
+ const gfx::Size& target);
+
+// Return a scaled |size| whose area is greater than or equal to |target|, where
+// one of its dimensions is equal to |target|'s. The aspect ratio of |size| is
+// preserved as closely as possible. If |size| is empty, the result will be
+// empty.
+MEDIA_EXPORT gfx::Size ScaleSizeToEncompassTarget(const gfx::Size& size,
+ const gfx::Size& target);
+
+// Returns |size| with only one of its dimensions increased such that the result
+// matches the aspect ratio of |target|. This is different from
+// ScaleSizeToEncompassTarget() in two ways: 1) The goal is to match the aspect
+// ratio of |target| rather than that of |size|. 2) Only one of the dimensions
+// of |size| may change, and it may only be increased (padded). If either
+// |size| or |target| is empty, the result will be empty.
+MEDIA_EXPORT gfx::Size PadToMatchAspectRatio(const gfx::Size& size,
+ const gfx::Size& target);
+
// Copy an RGB bitmap into the specified |region_in_frame| of a YUV video frame.
// Fills the regions outside |region_in_frame| with black.
MEDIA_EXPORT void CopyRGBToVideoFrame(const uint8* source,
diff --git a/chromium/media/base/video_util_unittest.cc b/chromium/media/base/video_util_unittest.cc
index 9ac13c1f610..79c53159ab8 100644
--- a/chromium/media/base/video_util_unittest.cc
+++ b/chromium/media/base/video_util_unittest.cc
@@ -328,6 +328,8 @@ TEST_P(VideoUtilRotationTest, Rotate) {
INSTANTIATE_TEST_CASE_P(, VideoUtilRotationTest,
testing::ValuesIn(kVideoRotationTestData));
+// Tests the ComputeLetterboxRegion function. Also, because of shared code
+// internally, this also tests ScaleSizeToFitWithinTarget().
TEST_F(VideoUtilTest, ComputeLetterboxRegion) {
EXPECT_EQ(gfx::Rect(167, 0, 666, 500),
ComputeLetterboxRegion(gfx::Rect(0, 0, 1000, 500),
@@ -348,6 +350,48 @@ TEST_F(VideoUtilTest, ComputeLetterboxRegion) {
gfx::Size(0, 0)).IsEmpty());
}
+TEST_F(VideoUtilTest, ScaleSizeToEncompassTarget) {
+ EXPECT_EQ(gfx::Size(1000, 750),
+ ScaleSizeToEncompassTarget(gfx::Size(640, 480),
+ gfx::Size(1000, 500)));
+ EXPECT_EQ(gfx::Size(1333, 1000),
+ ScaleSizeToEncompassTarget(gfx::Size(640, 480),
+ gfx::Size(500, 1000)));
+ EXPECT_EQ(gfx::Size(1000, 562),
+ ScaleSizeToEncompassTarget(gfx::Size(1920, 1080),
+ gfx::Size(1000, 500)));
+ EXPECT_EQ(gfx::Size(133, 100),
+ ScaleSizeToEncompassTarget(gfx::Size(400, 300),
+ gfx::Size(100, 100)));
+ EXPECT_EQ(gfx::Size(2666666666, 2000000000),
+ ScaleSizeToEncompassTarget(gfx::Size(40000, 30000),
+ gfx::Size(2000000000, 2000000000)));
+ EXPECT_TRUE(ScaleSizeToEncompassTarget(
+ gfx::Size(0, 0), gfx::Size(2000000000, 2000000000)).IsEmpty());
+}
+
+TEST_F(VideoUtilTest, PadToMatchAspectRatio) {
+ EXPECT_EQ(gfx::Size(640, 480),
+ PadToMatchAspectRatio(gfx::Size(640, 480), gfx::Size(640, 480)));
+ EXPECT_EQ(gfx::Size(640, 480),
+ PadToMatchAspectRatio(gfx::Size(640, 480), gfx::Size(4, 3)));
+ EXPECT_EQ(gfx::Size(960, 480),
+ PadToMatchAspectRatio(gfx::Size(640, 480), gfx::Size(1000, 500)));
+ EXPECT_EQ(gfx::Size(640, 1280),
+ PadToMatchAspectRatio(gfx::Size(640, 480), gfx::Size(500, 1000)));
+ EXPECT_EQ(gfx::Size(2160, 1080),
+ PadToMatchAspectRatio(gfx::Size(1920, 1080), gfx::Size(1000, 500)));
+ EXPECT_EQ(gfx::Size(400, 400),
+ PadToMatchAspectRatio(gfx::Size(400, 300), gfx::Size(100, 100)));
+ EXPECT_EQ(gfx::Size(400, 400),
+ PadToMatchAspectRatio(gfx::Size(300, 400), gfx::Size(100, 100)));
+ EXPECT_EQ(gfx::Size(40000, 40000),
+ PadToMatchAspectRatio(gfx::Size(40000, 30000),
+ gfx::Size(2000000000, 2000000000)));
+ EXPECT_TRUE(PadToMatchAspectRatio(
+ gfx::Size(40000, 30000), gfx::Size(0, 0)).IsEmpty());
+}
+
TEST_F(VideoUtilTest, LetterboxYUV) {
int width = 40;
int height = 30;
diff --git a/chromium/media/base/wall_clock_time_source.cc b/chromium/media/base/wall_clock_time_source.cc
index 408de93e05e..1c59be948bb 100644
--- a/chromium/media/base/wall_clock_time_source.cc
+++ b/chromium/media/base/wall_clock_time_source.cc
@@ -5,14 +5,11 @@
#include "media/base/wall_clock_time_source.h"
#include "base/logging.h"
-#include "base/time/default_tick_clock.h"
namespace media {
WallClockTimeSource::WallClockTimeSource()
- : tick_clock_(new base::DefaultTickClock()),
- ticking_(false),
- playback_rate_(1.0f) {
+ : tick_clock_(&default_tick_clock_), ticking_(false), playback_rate_(1.0) {
}
WallClockTimeSource::~WallClockTimeSource() {
@@ -35,7 +32,7 @@ void WallClockTimeSource::StopTicking() {
reference_wall_ticks_ = tick_clock_->NowTicks();
}
-void WallClockTimeSource::SetPlaybackRate(float playback_rate) {
+void WallClockTimeSource::SetPlaybackRate(double playback_rate) {
DVLOG(1) << __FUNCTION__ << "(" << playback_rate << ")";
base::AutoLock auto_lock(lock_);
// Estimate current media time using old rate to use as a new base time for
@@ -60,18 +57,27 @@ base::TimeDelta WallClockTimeSource::CurrentMediaTime() {
return CurrentMediaTime_Locked();
}
-base::TimeDelta WallClockTimeSource::CurrentMediaTimeForSyncingVideo() {
- return CurrentMediaTime();
-}
-
-void WallClockTimeSource::SetTickClockForTesting(
- scoped_ptr<base::TickClock> tick_clock) {
- tick_clock_.swap(tick_clock);
+bool WallClockTimeSource::GetWallClockTimes(
+ const std::vector<base::TimeDelta>& media_timestamps,
+ std::vector<base::TimeTicks>* wall_clock_times) {
+ base::AutoLock auto_lock(lock_);
+ if (!ticking_ || !playback_rate_)
+ return false;
+
+ DCHECK(wall_clock_times->empty());
+ wall_clock_times->reserve(media_timestamps.size());
+ for (const auto& media_timestamp : media_timestamps) {
+ wall_clock_times->push_back(
+ reference_wall_ticks_ +
+ base::TimeDelta::FromMicroseconds(
+ (media_timestamp - base_time_).InMicroseconds() / playback_rate_));
+ }
+ return true;
}
base::TimeDelta WallClockTimeSource::CurrentMediaTime_Locked() {
lock_.AssertAcquired();
- if (!ticking_)
+ if (!ticking_ || !playback_rate_)
return base_time_;
base::TimeTicks now = tick_clock_->NowTicks();
diff --git a/chromium/media/base/wall_clock_time_source.h b/chromium/media/base/wall_clock_time_source.h
index c1aca3fba77..22283192c39 100644
--- a/chromium/media/base/wall_clock_time_source.h
+++ b/chromium/media/base/wall_clock_time_source.h
@@ -7,13 +7,10 @@
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
+#include "base/time/default_tick_clock.h"
#include "media/base/media_export.h"
#include "media/base/time_source.h"
-namespace base {
-class TickClock;
-}
-
namespace media {
// A time source that uses interpolation based on the system clock.
@@ -25,23 +22,32 @@ class MEDIA_EXPORT WallClockTimeSource : public TimeSource {
// TimeSource implementation.
void StartTicking() override;
void StopTicking() override;
- void SetPlaybackRate(float playback_rate) override;
+ void SetPlaybackRate(double playback_rate) override;
void SetMediaTime(base::TimeDelta time) override;
base::TimeDelta CurrentMediaTime() override;
- base::TimeDelta CurrentMediaTimeForSyncingVideo() override;
+ bool GetWallClockTimes(
+ const std::vector<base::TimeDelta>& media_timestamps,
+ std::vector<base::TimeTicks>* wall_clock_times) override;
- void SetTickClockForTesting(scoped_ptr<base::TickClock> tick_clock);
+ void set_tick_clock_for_testing(base::TickClock* tick_clock) {
+ tick_clock_ = tick_clock;
+ }
private:
base::TimeDelta CurrentMediaTime_Locked();
- scoped_ptr<base::TickClock> tick_clock_;
+ // Allow for an injectable tick clock for testing.
+ base::DefaultTickClock default_tick_clock_;
+
+ // If specified, used instead of |default_tick_clock_|.
+ base::TickClock* tick_clock_;
+
bool ticking_;
// While ticking we can interpolate the current media time by measuring the
// delta between our reference ticks and the current system ticks and scaling
// that time by the playback rate.
- float playback_rate_;
+ double playback_rate_;
base::TimeDelta base_time_;
base::TimeTicks reference_wall_ticks_;
diff --git a/chromium/media/base/wall_clock_time_source_unittest.cc b/chromium/media/base/wall_clock_time_source_unittest.cc
index 08a424d42a4..d9b632ffd2b 100644
--- a/chromium/media/base/wall_clock_time_source_unittest.cc
+++ b/chromium/media/base/wall_clock_time_source_unittest.cc
@@ -11,8 +11,8 @@ namespace media {
class WallClockTimeSourceTest : public testing::Test {
public:
WallClockTimeSourceTest() : tick_clock_(new base::SimpleTestTickClock()) {
- time_source_.SetTickClockForTesting(
- scoped_ptr<base::TickClock>(tick_clock_));
+ time_source_.set_tick_clock_for_testing(tick_clock_.get());
+ AdvanceTimeInSeconds(1);
}
~WallClockTimeSourceTest() override {}
@@ -28,36 +28,60 @@ class WallClockTimeSourceTest : public testing::Test {
return time_source_.SetMediaTime(base::TimeDelta::FromSeconds(seconds));
}
- WallClockTimeSource time_source_;
+ bool IsWallClockNowForMediaTimeInSeconds(int seconds) {
+ std::vector<base::TimeTicks> wall_clock_times;
+ EXPECT_TRUE(time_source_.GetWallClockTimes(
+ std::vector<base::TimeDelta>(1, base::TimeDelta::FromSeconds(seconds)),
+ &wall_clock_times));
+ return tick_clock_->NowTicks() == wall_clock_times.front();
+ }
- private:
- base::SimpleTestTickClock* tick_clock_; // Owned by |time_source_|.
+ bool IsTimeStopped() {
+ std::vector<base::TimeTicks> wall_clock_times;
+ // Convert any random value, it shouldn't matter for this call.
+ const bool time_stopped = !time_source_.GetWallClockTimes(
+ std::vector<base::TimeDelta>(1, base::TimeDelta::FromSeconds(1)),
+ &wall_clock_times);
+ EXPECT_EQ(time_stopped, wall_clock_times.empty());
+ return time_stopped;
+ }
+
+ protected:
+ WallClockTimeSource time_source_;
+ scoped_ptr<base::SimpleTestTickClock> tick_clock_;
DISALLOW_COPY_AND_ASSIGN(WallClockTimeSourceTest);
};
TEST_F(WallClockTimeSourceTest, InitialTimeIsZero) {
EXPECT_EQ(0, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsTimeStopped());
}
TEST_F(WallClockTimeSourceTest, InitialTimeIsNotTicking) {
EXPECT_EQ(0, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsTimeStopped());
AdvanceTimeInSeconds(100);
EXPECT_EQ(0, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsTimeStopped());
}
TEST_F(WallClockTimeSourceTest, InitialPlaybackRateIsOne) {
time_source_.StartTicking();
EXPECT_EQ(0, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(0));
AdvanceTimeInSeconds(100);
EXPECT_EQ(100, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(100));
}
TEST_F(WallClockTimeSourceTest, SetMediaTime) {
EXPECT_EQ(0, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsTimeStopped());
SetMediaTimeInSeconds(10);
EXPECT_EQ(10, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsTimeStopped());
}
TEST_F(WallClockTimeSourceTest, SetPlaybackRate) {
@@ -65,26 +89,33 @@ TEST_F(WallClockTimeSourceTest, SetPlaybackRate) {
time_source_.SetPlaybackRate(0.5);
EXPECT_EQ(0, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(0));
AdvanceTimeInSeconds(10);
EXPECT_EQ(5, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(5));
time_source_.SetPlaybackRate(2);
EXPECT_EQ(5, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(5));
AdvanceTimeInSeconds(10);
EXPECT_EQ(25, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(25));
}
TEST_F(WallClockTimeSourceTest, StopTicking) {
time_source_.StartTicking();
EXPECT_EQ(0, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(0));
AdvanceTimeInSeconds(10);
EXPECT_EQ(10, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsWallClockNowForMediaTimeInSeconds(10));
time_source_.StopTicking();
AdvanceTimeInSeconds(10);
EXPECT_EQ(10, CurrentMediaTimeInSeconds());
+ EXPECT_TRUE(IsTimeStopped());
}
} // namespace media
diff --git a/chromium/media/base/yuv_convert.cc b/chromium/media/base/yuv_convert.cc
index 431183abb39..9bc9a93197c 100644
--- a/chromium/media/base/yuv_convert.cc
+++ b/chromium/media/base/yuv_convert.cc
@@ -18,14 +18,16 @@
#include "media/base/yuv_convert.h"
#include "base/cpu.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/scoped_ptr.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "build/build_config.h"
#include "media/base/simd/convert_rgb_to_yuv.h"
#include "media/base/simd/convert_yuv_to_rgb.h"
#include "media/base/simd/filter_yuv.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
#if defined(ARCH_CPU_X86_FAMILY)
#if defined(COMPILER_MSVC)
@@ -85,7 +87,7 @@ typedef void (*ConvertYUVToRGB32RowProc)(const uint8*,
const uint8*,
uint8*,
ptrdiff_t,
- const int16[1024][4]);
+ const int16*);
typedef void (*ConvertYUVAToARGBRowProc)(const uint8*,
const uint8*,
@@ -93,7 +95,7 @@ typedef void (*ConvertYUVAToARGBRowProc)(const uint8*,
const uint8*,
uint8*,
ptrdiff_t,
- const int16[1024][4]);
+ const int16*);
typedef void (*ScaleYUVToRGB32RowProc)(const uint8*,
const uint8*,
@@ -101,7 +103,7 @@ typedef void (*ScaleYUVToRGB32RowProc)(const uint8*,
uint8*,
ptrdiff_t,
ptrdiff_t,
- const int16[1024][4]);
+ const int16*);
static FilterYUVRowsProc g_filter_yuv_rows_proc_ = NULL;
static ConvertYUVToRGB32RowProc g_convert_yuv_to_rgb32_row_proc_ = NULL;
@@ -112,6 +114,23 @@ static ConvertRGBToYUVProc g_convert_rgb24_to_yuv_proc_ = NULL;
static ConvertYUVToRGB32Proc g_convert_yuv_to_rgb32_proc_ = NULL;
static ConvertYUVAToARGBProc g_convert_yuva_to_argb_proc_ = NULL;
+static const int kYUVToRGBTableSize = 256 * 4 * 4 * sizeof(int16);
+
+// base::AlignedMemory has a private operator new(), so wrap it in a struct so
+// that we can put it in a LazyInstance::Leaky.
+struct YUVToRGBTableWrapper {
+ base::AlignedMemory<kYUVToRGBTableSize, 16> table;
+};
+
+typedef base::LazyInstance<YUVToRGBTableWrapper>::Leaky
+ YUVToRGBTable;
+static YUVToRGBTable g_table_rec601 = LAZY_INSTANCE_INITIALIZER;
+static YUVToRGBTable g_table_jpeg = LAZY_INSTANCE_INITIALIZER;
+static YUVToRGBTable g_table_rec709 = LAZY_INSTANCE_INITIALIZER;
+static const int16* g_table_rec601_ptr = NULL;
+static const int16* g_table_jpeg_ptr = NULL;
+static const int16* g_table_rec709_ptr = NULL;
+
// Empty SIMD registers state after using them.
void EmptyRegisterStateStub() {}
#if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
@@ -127,22 +146,81 @@ int GetVerticalShift(YUVType type) {
return 0;
case YV12:
case YV12J:
+ case YV12HD:
return 1;
}
NOTREACHED();
return 0;
}
-const int16 (&GetLookupTable(YUVType type))[1024][4] {
+const int16* GetLookupTable(YUVType type) {
switch (type) {
case YV12:
case YV16:
- return kCoefficientsRgbY;
+ return g_table_rec601_ptr;
case YV12J:
- return kCoefficientsRgbY_JPEG;
+ return g_table_jpeg_ptr;
+ case YV12HD:
+ return g_table_rec709_ptr;
}
NOTREACHED();
- return kCoefficientsRgbY;
+ return NULL;
+}
+
+// Populates a pre-allocated lookup table from a YUV->RGB matrix.
+const int16* PopulateYUVToRGBTable(const double matrix[3][3],
+ bool full_range,
+ int16* table) {
+ // We'll have 4 sub-tables that lie contiguous in memory, one for each of Y,
+ // U, V and A.
+ const int kNumTables = 4;
+ // Each table has 256 rows (for all possible 8-bit values).
+ const int kNumRows = 256;
+ // Each row has 4 columns, for contributions to each of R, G, B and A.
+ const int kNumColumns = 4;
+ // Each element is a fixed-point (10.6) 16-bit signed value.
+ const int kElementSize = sizeof(int16);
+
+ // Sanity check that our constants here match the size of the statically
+ // allocated tables.
+ COMPILE_ASSERT(
+ kNumTables * kNumRows * kNumColumns * kElementSize == kYUVToRGBTableSize,
+ "YUV lookup table size doesn't match expectation.");
+
+ // Y needs an offset of -16 for color ranges that ignore the lower 16 values,
+ // U and V get -128 to put them in [-128, 127] from [0, 255].
+ int offsets[3] = {(full_range ? 0 : -16), -128, -128};
+
+ for (int i = 0; i < kNumRows; ++i) {
+ // Y, U, and V contributions to each of R, G, B and A.
+ for (int j = 0; j < 3; ++j) {
+#if defined(OS_ANDROID)
+ // Android is RGBA.
+ table[(j * kNumRows + i) * kNumColumns + 0] =
+ matrix[j][0] * 64 * (i + offsets[j]) + 0.5;
+ table[(j * kNumRows + i) * kNumColumns + 1] =
+ matrix[j][1] * 64 * (i + offsets[j]) + 0.5;
+ table[(j * kNumRows + i) * kNumColumns + 2] =
+ matrix[j][2] * 64 * (i + offsets[j]) + 0.5;
+#else
+ // Other platforms are BGRA.
+ table[(j * kNumRows + i) * kNumColumns + 0] =
+ matrix[j][2] * 64 * (i + offsets[j]) + 0.5;
+ table[(j * kNumRows + i) * kNumColumns + 1] =
+ matrix[j][1] * 64 * (i + offsets[j]) + 0.5;
+ table[(j * kNumRows + i) * kNumColumns + 2] =
+ matrix[j][0] * 64 * (i + offsets[j]) + 0.5;
+#endif
+ // Alpha contributions from Y and V are always 0. U is set such that
+ // all values result in a full '255' alpha value.
+ table[(j * kNumRows + i) * kNumColumns + 3] = (j == 1) ? 256 * 64 - 1 : 0;
+ }
+ // And YUVA alpha is passed through as-is.
+ for (int k = 0; k < kNumTables; ++k)
+ table[((kNumTables - 1) * kNumRows + i) * kNumColumns + k] = i;
+ }
+
+ return table;
}
void InitializeCPUSpecificYUVConversions() {
@@ -166,8 +244,9 @@ void InitializeCPUSpecificYUVConversions() {
g_convert_yuva_to_argb_proc_ = ConvertYUVAToARGB_C;
g_empty_register_state_proc_ = EmptyRegisterStateStub;
- // Assembly code confuses MemorySanitizer.
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(MEMORY_SANITIZER)
+ // Assembly code confuses MemorySanitizer. Also not available in iOS builds.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(MEMORY_SANITIZER) && \
+ !defined(OS_IOS)
g_convert_yuva_to_argb_proc_ = ConvertYUVAToARGB_MMX;
#if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
@@ -203,6 +282,34 @@ void InitializeCPUSpecificYUVConversions() {
// See: crbug.com/100462
}
#endif
+
+ // Initialize YUV conversion lookup tables.
+
+ // SD Rec601 YUV->RGB matrix, see http://www.fourcc.org/fccyvrgb.php
+ const double kRec601ConvertMatrix[3][3] = {
+ {1.164, 1.164, 1.164}, {0.0, -0.391, 2.018}, {1.596, -0.813, 0.0},
+ };
+
+ // JPEG table, values from above link.
+ const double kJPEGConvertMatrix[3][3] = {
+ {1.0, 1.0, 1.0}, {0.0, -0.34414, 1.772}, {1.402, -0.71414, 0.0},
+ };
+
+ // Rec709 "HD" color space, values from:
+ // http://www.equasys.de/colorconversion.html
+ const double kRec709ConvertMatrix[3][3] = {
+ {1.164, 1.164, 1.164}, {0.0, -0.213, 2.112}, {1.793, -0.533, 0.0},
+ };
+
+ PopulateYUVToRGBTable(kRec601ConvertMatrix, false,
+ g_table_rec601.Get().table.data_as<int16>());
+ PopulateYUVToRGBTable(kJPEGConvertMatrix, true,
+ g_table_jpeg.Get().table.data_as<int16>());
+ PopulateYUVToRGBTable(kRec709ConvertMatrix, false,
+ g_table_rec709.Get().table.data_as<int16>());
+ g_table_rec601_ptr = g_table_rec601.Get().table.data_as<int16>();
+ g_table_rec709_ptr = g_table_rec709.Get().table.data_as<int16>();
+ g_table_jpeg_ptr = g_table_jpeg.Get().table.data_as<int16>();
}
// Empty SIMD registers state after using them.
@@ -234,6 +341,8 @@ void ScaleYUVToRGB32(const uint8* y_buf,
width == 0 || height == 0)
return;
+ const int16* lookup_table = GetLookupTable(yuv_type);
+
// 4096 allows 3 buffers to fit in 12k.
// Helps performance on CPU with 16K L1 cache.
// Large enough for 3830x2160 and 30" displays which are 2560x1600.
@@ -377,25 +486,16 @@ void ScaleYUVToRGB32(const uint8* y_buf,
v_ptr = v_buf + (source_y >> y_shift) * uv_pitch;
}
if (source_dx == kFractionMax) { // Not scaled
- g_convert_yuv_to_rgb32_row_proc_(
- y_ptr, u_ptr, v_ptr, dest_pixel, width, kCoefficientsRgbY);
+ g_convert_yuv_to_rgb32_row_proc_(y_ptr, u_ptr, v_ptr, dest_pixel, width,
+ lookup_table);
} else {
if (filter & FILTER_BILINEAR_H) {
- g_linear_scale_yuv_to_rgb32_row_proc_(y_ptr,
- u_ptr,
- v_ptr,
- dest_pixel,
- width,
- source_dx,
- kCoefficientsRgbY);
+ g_linear_scale_yuv_to_rgb32_row_proc_(y_ptr, u_ptr, v_ptr, dest_pixel,
+ width, source_dx,
+ lookup_table);
} else {
- g_scale_yuv_to_rgb32_row_proc_(y_ptr,
- u_ptr,
- v_ptr,
- dest_pixel,
- width,
- source_dx,
- kCoefficientsRgbY);
+ g_scale_yuv_to_rgb32_row_proc_(y_ptr, u_ptr, v_ptr, dest_pixel, width,
+ source_dx, lookup_table);
}
}
}
@@ -429,6 +529,8 @@ void ScaleYUVToRGB32WithRect(const uint8* y_buf,
DCHECK(dest_rect_right > dest_rect_left);
DCHECK(dest_rect_bottom > dest_rect_top);
+ const int16* lookup_table = GetLookupTable(YV12);
+
// Fixed-point value of vertical and horizontal scale down factor.
// Values are in the format 16.16.
int y_step = kFractionMax * source_height / dest_height;
@@ -533,24 +635,14 @@ void ScaleYUVToRGB32WithRect(const uint8* y_buf,
// Perform horizontal interpolation and color space conversion.
// TODO(hclam): Use the MMX version after more testing.
- LinearScaleYUVToRGB32RowWithRange_C(y_temp,
- u_temp,
- v_temp,
- rgb_buf,
- dest_rect_width,
- source_left,
- x_step,
- kCoefficientsRgbY);
+ LinearScaleYUVToRGB32RowWithRange_C(y_temp, u_temp, v_temp, rgb_buf,
+ dest_rect_width, source_left, x_step,
+ lookup_table);
} else {
// If the frame is too large then we linear scale a single row.
- LinearScaleYUVToRGB32RowWithRange_C(y0_ptr,
- u0_ptr,
- v0_ptr,
- rgb_buf,
- dest_rect_width,
- source_left,
- x_step,
- kCoefficientsRgbY);
+ LinearScaleYUVToRGB32RowWithRange_C(y0_ptr, u0_ptr, v0_ptr, rgb_buf,
+ dest_rect_width, source_left, x_step,
+ lookup_table);
}
// Advance vertically in the source and destination image.
diff --git a/chromium/media/base/yuv_convert.h b/chromium/media/base/yuv_convert.h
index cf13edb17ab..e7c2860d304 100644
--- a/chromium/media/base/yuv_convert.h
+++ b/chromium/media/base/yuv_convert.h
@@ -7,7 +7,6 @@
#include "base/basictypes.h"
#include "media/base/media_export.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
// Visual Studio 2010 does not support MMX intrinsics on x64.
// Some win64 yuv_convert code paths use SSE+MMX yasm, so without rewriting
@@ -24,16 +23,17 @@ namespace media {
// Type of YUV surface.
enum YUVType {
- YV16 = 0, // YV16 is half width and full height chroma channels.
- YV12 = 1, // YV12 is half width and half height chroma channels.
- YV12J = 2, // YV12J is the same as YV12, but in JPEG color range.
+ YV16 = 0, // YV16 is half width and full height chroma channels.
+ YV12 = 1, // YV12 is half width and half height chroma channels.
+ YV12J = 2, // YV12J is the same as YV12, but in JPEG color range.
+ YV12HD = 3, // YV12HD is the same as YV12, but in 'HD' Rec709 color space.
};
// Get the appropriate value to bitshift by for vertical indices.
MEDIA_EXPORT int GetVerticalShift(YUVType type);
// Get the appropriate lookup table for a given YUV format.
-MEDIA_EXPORT const int16 (&GetLookupTable(YUVType type))[1024][4];
+MEDIA_EXPORT const int16* GetLookupTable(YUVType type);
// Mirror means flip the image horizontally, as in looking in a mirror.
// Rotate happens after mirroring.
diff --git a/chromium/media/base/yuv_convert_perftest.cc b/chromium/media/base/yuv_convert_perftest.cc
index c6cacafd646..7d3d64c956c 100644
--- a/chromium/media/base/yuv_convert_perftest.cc
+++ b/chromium/media/base/yuv_convert_perftest.cc
@@ -67,7 +67,7 @@ class YUVConvertPerfTest : public testing::Test {
TEST_F(YUVConvertPerfTest, ConvertYUVToRGB32Row_SSE) {
ASSERT_TRUE(base::CPU().has_sse());
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
for (int i = 0; i < kPerfTestIterations; ++i) {
for (int row = 0; row < kSourceHeight; ++row) {
int chroma_row = row / 2;
@@ -80,8 +80,7 @@ TEST_F(YUVConvertPerfTest, ConvertYUVToRGB32Row_SSE) {
GetLookupTable(YV12));
}
}
- double total_time_seconds =
- (base::TimeTicks::HighResNow() - start).InSecondsF();
+ double total_time_seconds = (base::TimeTicks::Now() - start).InSecondsF();
perf_test::PrintResult(
"yuv_convert_perftest", "", "ConvertYUVToRGB32Row_SSE",
kPerfTestIterations / total_time_seconds, "runs/s", true);
@@ -96,7 +95,7 @@ TEST_F(YUVConvertPerfTest, ScaleYUVToRGB32Row_SSE) {
const int kSourceDx = 80000; // This value means a scale down.
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
for (int i = 0; i < kPerfTestIterations; ++i) {
for (int row = 0; row < kSourceHeight; ++row) {
int chroma_row = row / 2;
@@ -110,8 +109,7 @@ TEST_F(YUVConvertPerfTest, ScaleYUVToRGB32Row_SSE) {
GetLookupTable(YV12));
}
}
- double total_time_seconds =
- (base::TimeTicks::HighResNow() - start).InSecondsF();
+ double total_time_seconds = (base::TimeTicks::Now() - start).InSecondsF();
perf_test::PrintResult(
"yuv_convert_perftest", "", "ScaleYUVToRGB32Row_SSE",
kPerfTestIterations / total_time_seconds, "runs/s", true);
@@ -123,7 +121,7 @@ TEST_F(YUVConvertPerfTest, LinearScaleYUVToRGB32Row_SSE) {
const int kSourceDx = 80000; // This value means a scale down.
- base::TimeTicks start = base::TimeTicks::HighResNow();
+ base::TimeTicks start = base::TimeTicks::Now();
for (int i = 0; i < kPerfTestIterations; ++i) {
for (int row = 0; row < kSourceHeight; ++row) {
int chroma_row = row / 2;
@@ -137,8 +135,7 @@ TEST_F(YUVConvertPerfTest, LinearScaleYUVToRGB32Row_SSE) {
GetLookupTable(YV12));
}
}
- double total_time_seconds =
- (base::TimeTicks::HighResNow() - start).InSecondsF();
+ double total_time_seconds = (base::TimeTicks::Now() - start).InSecondsF();
perf_test::PrintResult(
"yuv_convert_perftest", "", "LinearScaleYUVToRGB32Row_SSE",
kPerfTestIterations / total_time_seconds, "runs/s", true);
diff --git a/chromium/media/base/yuv_convert_unittest.cc b/chromium/media/base/yuv_convert_unittest.cc
index 5e9d60830a5..9ab8a9cab31 100644
--- a/chromium/media/base/yuv_convert_unittest.cc
+++ b/chromium/media/base/yuv_convert_unittest.cc
@@ -11,10 +11,9 @@
#include "media/base/simd/convert_rgb_to_yuv.h"
#include "media/base/simd/convert_yuv_to_rgb.h"
#include "media/base/simd/filter_yuv.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
#include "media/base/yuv_convert.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gfx/rect.h"
+#include "ui/gfx/geometry/rect.h"
// Size of raw image.
static const int kSourceWidth = 640;
@@ -175,6 +174,7 @@ class YUVScaleTest : public ::testing::TestWithParam<YUVScaleTestData> {
switch (GetParam().yuv_type) {
case media::YV12:
case media::YV12J:
+ case media::YV12HD:
ReadYV12Data(&yuv_bytes_);
break;
case media::YV16:
@@ -192,6 +192,7 @@ class YUVScaleTest : public ::testing::TestWithParam<YUVScaleTestData> {
switch (GetParam().yuv_type) {
case media::YV12:
case media::YV12J:
+ case media::YV12HD:
return yuv_bytes_.get() + kSourceVOffset;
case media::YV16:
return yuv_bytes_.get() + kSourceYSize * 3 / 2;
diff --git a/chromium/media/blink/BUILD.gn b/chromium/media/blink/BUILD.gn
index 078b82424c4..1005bd1aa95 100644
--- a/chromium/media/blink/BUILD.gn
+++ b/chromium/media/blink/BUILD.gn
@@ -1,6 +1,9 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
+import("//testing/test.gni")
+
component("blink") {
output_name = "media_blink"
@@ -8,9 +11,11 @@ component("blink") {
"//base",
"//cc",
"//cc/blink",
+ "//gpu/blink",
"//media",
"//media:shared_memory_support",
"//net",
+ "//skia",
"//third_party/WebKit/public:blink",
"//ui/gfx",
"//ui/gfx/geometry",
@@ -33,18 +38,28 @@ component("blink") {
"cdm_result_promise.h",
"cdm_result_promise_helper.cc",
"cdm_result_promise_helper.h",
+ "cdm_session_adapter.cc",
+ "cdm_session_adapter.h",
"encrypted_media_player_support.cc",
"encrypted_media_player_support.h",
+ "key_system_config_selector.cc",
+ "key_system_config_selector.h",
"new_session_cdm_result_promise.cc",
"new_session_cdm_result_promise.h",
- "null_encrypted_media_player_support.cc",
- "null_encrypted_media_player_support.h",
"texttrack_impl.cc",
"texttrack_impl.h",
"video_frame_compositor.cc",
"video_frame_compositor.h",
"webaudiosourceprovider_impl.cc",
"webaudiosourceprovider_impl.h",
+ "webcontentdecryptionmodule_impl.cc",
+ "webcontentdecryptionmodule_impl.h",
+ "webcontentdecryptionmoduleaccess_impl.cc",
+ "webcontentdecryptionmoduleaccess_impl.h",
+ "webcontentdecryptionmodulesession_impl.cc",
+ "webcontentdecryptionmodulesession_impl.h",
+ "webencryptedmediaclient_impl.cc",
+ "webencryptedmediaclient_impl.h",
"webinbandtexttrack_impl.cc",
"webinbandtexttrack_impl.h",
"webmediaplayer_delegate.h",
@@ -62,43 +77,56 @@ component("blink") {
if (is_android) {
sources -= [
+ "encrypted_media_player_support.cc",
+ "encrypted_media_player_support.h",
"webmediaplayer_impl.cc",
"webmediaplayer_impl.h",
]
}
}
-test("media_blink_unittests") {
- deps = [
- ":blink",
- "//base",
- "//base/test:test_support",
- "//cc",
- "//cc/blink",
- "//media",
- "//media:shared_memory_support",
- "//media/base:test_support",
- "//net",
- "//testing/gmock",
- "//testing/gtest",
- "//third_party/WebKit/public:blink",
- "//ui/gfx/geometry",
- "//ui/gfx:test_support",
- "//url",
- ]
+if (!is_mac) {
+ # TODO(GYP): Make linking this work on the mac.
+ test("media_blink_unittests") {
+ deps = [
+ ":blink",
+ "//base",
+ "//base/test:test_support",
+ "//cc",
+ "//cc/blink",
+ "//gin",
+ "//media",
+ "//media:shared_memory_support",
+ "//media/base:test_support",
+ "//net",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/WebKit/public:blink",
+ "//ui/gfx/geometry",
+ "//ui/gfx:test_support",
+ "//url",
+ ]
- sources = [
- "buffered_data_source_host_impl_unittest.cc",
- "buffered_data_source_unittest.cc",
- "buffered_resource_loader_unittest.cc",
- "cache_util_unittest.cc",
- "mock_webframeclient.h",
- "mock_weburlloader.cc",
- "mock_weburlloader.h",
- "run_all_unittests.cc",
- "test_response_generator.cc",
- "test_response_generator.h",
- "video_frame_compositor_unittest.cc",
- "webaudiosourceprovider_impl_unittest.cc",
- ]
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ sources = [
+ "buffered_data_source_host_impl_unittest.cc",
+ "buffered_data_source_unittest.cc",
+ "buffered_resource_loader_unittest.cc",
+ "cache_util_unittest.cc",
+ "key_system_config_selector_unittest.cc",
+ "mock_webframeclient.h",
+ "mock_weburlloader.cc",
+ "mock_weburlloader.h",
+ "run_all_unittests.cc",
+ "test_response_generator.cc",
+ "test_response_generator.h",
+ "video_frame_compositor_unittest.cc",
+ "webaudiosourceprovider_impl_unittest.cc",
+ ]
+
+ if (is_android) {
+ deps += [ "//ui/gl" ]
+ }
+ }
}
diff --git a/chromium/media/blink/DEPS b/chromium/media/blink/DEPS
index 5c058166257..9ab9dd462ab 100644
--- a/chromium/media/blink/DEPS
+++ b/chromium/media/blink/DEPS
@@ -1,10 +1,12 @@
include_rules = [
+ "+cc/blink/web_layer_impl.h",
"+cc/layers/video_frame_provider.h",
"+cc/layers/video_layer.h",
- "+cc/blink/web_layer_impl.h",
+ "+gin",
+ "+gpu/blink",
"+media",
"+net/base",
"+net/http",
"+third_party/WebKit/public/platform",
"+third_party/WebKit/public/web",
-] \ No newline at end of file
+]
diff --git a/chromium/media/blink/buffered_data_source.cc b/chromium/media/blink/buffered_data_source.cc
index 16128f1dd30..d5f32b1ca29 100644
--- a/chromium/media/blink/buffered_data_source.cc
+++ b/chromium/media/blink/buffered_data_source.cc
@@ -20,9 +20,12 @@ namespace {
// of FFmpeg.
const int kInitialReadBufferSize = 32768;
-// Number of cache misses we allow for a single Read() before signaling an
-// error.
-const int kNumCacheMissRetries = 3;
+// Number of cache misses or read failures we allow for a single Read() before
+// signaling an error.
+const int kLoaderRetries = 3;
+
+// The number of milliseconds to wait before retrying a failed load.
+const int kLoaderFailedRetryDelayMs = 250;
} // namespace
@@ -102,11 +105,15 @@ BufferedDataSource::BufferedDataSource(
host_(host),
downloading_cb_(downloading_cb),
weak_factory_(this) {
+ weak_ptr_ = weak_factory_.GetWeakPtr();
DCHECK(host_);
DCHECK(!downloading_cb_.is_null());
+ DCHECK(render_task_runner_->BelongsToCurrentThread());
}
-BufferedDataSource::~BufferedDataSource() {}
+BufferedDataSource::~BufferedDataSource() {
+ DCHECK(render_task_runner_->BelongsToCurrentThread());
+}
// A factory method to create BufferedResourceLoader using the read parameters.
// This method can be overridden to inject mock BufferedResourceLoader object
@@ -182,11 +189,11 @@ void BufferedDataSource::Abort() {
frame_ = NULL;
}
-void BufferedDataSource::MediaPlaybackRateChanged(float playback_rate) {
+void BufferedDataSource::MediaPlaybackRateChanged(double playback_rate) {
DCHECK(render_task_runner_->BelongsToCurrentThread());
DCHECK(loader_.get());
- if (playback_rate < 0.0f)
+ if (playback_rate < 0.0)
return;
playback_rate_ = playback_rate;
@@ -224,6 +231,12 @@ void BufferedDataSource::SetBitrate(int bitrate) {
bitrate));
}
+void BufferedDataSource::OnBufferingHaveEnough() {
+ DCHECK(render_task_runner_->BelongsToCurrentThread());
+ if (loader_ && preload_ == METADATA && !media_has_played_ && !IsStreaming())
+ loader_->CancelUponDeferral();
+}
+
void BufferedDataSource::Read(
int64 position, int size, uint8* data,
const DataSource::ReadCB& read_cb) {
@@ -424,8 +437,19 @@ void BufferedDataSource::ReadCallback(
// Stop the resource load if it failed.
loader_->Stop();
- if (status == BufferedResourceLoader::kCacheMiss &&
- read_op_->retries() < kNumCacheMissRetries) {
+ if (read_op_->retries() < kLoaderRetries) {
+ // Allow some resiliency against sporadic network failures or intentional
+ // cancellations due to a system suspend / resume. Here we treat failed
+ // reads as a cache miss so long as we haven't exceeded max retries.
+ if (status == BufferedResourceLoader::kFailed) {
+ render_task_runner_->PostDelayedTask(
+ FROM_HERE, base::Bind(&BufferedDataSource::ReadCallback,
+ weak_factory_.GetWeakPtr(),
+ BufferedResourceLoader::kCacheMiss, 0),
+ base::TimeDelta::FromMilliseconds(kLoaderFailedRetryDelayMs));
+ return;
+ }
+
read_op_->IncrementRetries();
// Recreate a loader starting from where we last left off until the
diff --git a/chromium/media/blink/buffered_data_source.h b/chromium/media/blink/buffered_data_source.h
index fc462b84ef6..32f14816fcd 100644
--- a/chromium/media/blink/buffered_data_source.h
+++ b/chromium/media/blink/buffered_data_source.h
@@ -42,8 +42,8 @@ class MEDIA_EXPORT BufferedDataSourceHost {
// A data source capable of loading URLs and buffering the data using an
// in-memory sliding window.
//
-// BufferedDataSource must be created and initialized on the render thread
-// before being passed to other threads. It may be deleted on any thread.
+// BufferedDataSource must be created and destroyed on the thread associated
+// with the |task_runner| passed in the constructor.
class MEDIA_EXPORT BufferedDataSource : public DataSource {
public:
// Used to specify video preload states. They are "hints" to the browser about
@@ -100,13 +100,19 @@ class MEDIA_EXPORT BufferedDataSource : public DataSource {
// Notifies changes in playback state for controlling media buffering
// behavior.
- void MediaPlaybackRateChanged(float playback_rate);
+ void MediaPlaybackRateChanged(double playback_rate);
void MediaIsPlaying();
void MediaIsPaused();
+ bool media_has_played() const { return media_has_played_; }
// Returns true if the resource is local.
bool assume_fully_buffered() { return !url_.SchemeIsHTTPOrHTTPS(); }
+ // Cancels any open network connections once reaching the deferred state for
+ // preload=metadata, non-streaming resources that have not started playback.
+ // If already deferred, connections will be immediately closed.
+ void OnBufferingHaveEnough();
+
// DataSource implementation.
// Called from demuxer thread.
void Stop() override;
@@ -223,7 +229,7 @@ class MEDIA_EXPORT BufferedDataSource : public DataSource {
int bitrate_;
// Current playback rate.
- float playback_rate_;
+ double playback_rate_;
scoped_refptr<MediaLog> media_log_;
@@ -232,7 +238,10 @@ class MEDIA_EXPORT BufferedDataSource : public DataSource {
DownloadingCB downloading_cb_;
- // NOTE: Weak pointers must be invalidated before all other member variables.
+ // Disallow rebinding WeakReference ownership to a different thread by keeping
+ // a persistent reference. This avoids problems with the thread-safety of
+ // reaching into this class from multiple threads to attain a WeakPtr.
+ base::WeakPtr<BufferedDataSource> weak_ptr_;
base::WeakPtrFactory<BufferedDataSource> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(BufferedDataSource);
diff --git a/chromium/media/blink/buffered_data_source_unittest.cc b/chromium/media/blink/buffered_data_source_unittest.cc
index a76beb00080..3776fd63f68 100644
--- a/chromium/media/blink/buffered_data_source_unittest.cc
+++ b/chromium/media/blink/buffered_data_source_unittest.cc
@@ -4,6 +4,7 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "media/base/media_log.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
@@ -17,7 +18,9 @@
using ::testing::_;
using ::testing::Assign;
+using ::testing::DoAll;
using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
using ::testing::InSequence;
using ::testing::NiceMock;
using ::testing::StrictMock;
@@ -220,6 +223,7 @@ class BufferedDataSourceTest : public testing::Test {
BufferedResourceLoader* loader() {
return data_source_->loader_.get();
}
+ ActiveLoader* active_loader() { return loader()->active_loader_.get(); }
WebURLLoader* url_loader() {
return loader()->active_loader_->loader_.get();
}
@@ -230,9 +234,9 @@ class BufferedDataSourceTest : public testing::Test {
return loader()->defer_strategy_;
}
int data_source_bitrate() { return data_source_->bitrate_; }
- int data_source_playback_rate() { return data_source_->playback_rate_; }
+ double data_source_playback_rate() { return data_source_->playback_rate_; }
int loader_bitrate() { return loader()->bitrate_; }
- int loader_playback_rate() { return loader()->playback_rate_; }
+ double loader_playback_rate() { return loader()->playback_rate_; }
bool is_local_source() { return data_source_->assume_fully_buffered(); }
void set_might_be_reused_from_cache_in_future(bool value) {
loader()->might_be_reused_from_cache_in_future_ = value;
@@ -410,6 +414,34 @@ TEST_F(BufferedDataSourceTest, Http_Retry) {
Stop();
}
+TEST_F(BufferedDataSourceTest, Http_RetryOnError) {
+ InitializeWith206Response();
+
+ // Read to advance our position.
+ EXPECT_CALL(*this, ReadCallback(kDataSize));
+ EXPECT_CALL(host_, AddBufferedByteRange(0, kDataSize - 1));
+ ReadAt(0);
+ ReceiveData(kDataSize);
+
+ // Issue a pending read but trigger an error to force a retry.
+ EXPECT_CALL(*this, ReadCallback(kDataSize));
+ EXPECT_CALL(host_, AddBufferedByteRange(kDataSize, (kDataSize * 2) - 1));
+ ReadAt(kDataSize);
+ base::RunLoop run_loop;
+ EXPECT_CALL(*data_source_, CreateResourceLoader(_, _))
+ .WillOnce(
+ DoAll(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit),
+ Invoke(data_source_.get(),
+ &MockBufferedDataSource::CreateMockResourceLoader)));
+ loader()->didFail(url_loader(), response_generator_->GenerateError());
+ run_loop.Run();
+ Respond(response_generator_->Generate206(kDataSize));
+ ReceiveData(kDataSize);
+ FinishLoading();
+ EXPECT_FALSE(data_source_->loading());
+ Stop();
+}
+
TEST_F(BufferedDataSourceTest, File_Retry) {
InitializeWithFileResponse();
@@ -528,9 +560,9 @@ TEST_F(BufferedDataSourceTest, DefaultValues) {
EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
EXPECT_EQ(0, data_source_bitrate());
- EXPECT_EQ(0.0f, data_source_playback_rate());
+ EXPECT_EQ(0.0, data_source_playback_rate());
EXPECT_EQ(0, loader_bitrate());
- EXPECT_EQ(0.0f, loader_playback_rate());
+ EXPECT_EQ(0.0, loader_playback_rate());
EXPECT_TRUE(data_source_->loading());
Stop();
@@ -562,10 +594,10 @@ TEST_F(BufferedDataSourceTest, SetBitrate) {
TEST_F(BufferedDataSourceTest, MediaPlaybackRateChanged) {
InitializeWith206Response();
- data_source_->MediaPlaybackRateChanged(2.0f);
+ data_source_->MediaPlaybackRateChanged(2.0);
message_loop_.RunUntilIdle();
- EXPECT_EQ(2.0f, data_source_playback_rate());
- EXPECT_EQ(2.0f, loader_playback_rate());
+ EXPECT_EQ(2.0, data_source_playback_rate());
+ EXPECT_EQ(2.0, loader_playback_rate());
// Read so far ahead to cause the loader to get recreated.
BufferedResourceLoader* old_loader = loader();
@@ -776,4 +808,45 @@ TEST_F(BufferedDataSourceTest,
Stop();
}
+TEST_F(BufferedDataSourceTest, ExternalResource_Response206_VerifyDefer) {
+ set_preload(BufferedDataSource::METADATA);
+ InitializeWith206Response();
+
+ EXPECT_EQ(BufferedDataSource::METADATA, preload());
+ EXPECT_FALSE(is_local_source());
+ EXPECT_TRUE(loader()->range_supported());
+ EXPECT_EQ(BufferedResourceLoader::kReadThenDefer, defer_strategy());
+
+ // Read a bit from the beginning.
+ ReadAt(0);
+ EXPECT_CALL(*this, ReadCallback(kDataSize));
+ EXPECT_CALL(host_, AddBufferedByteRange(0, kDataSize - 1));
+ ReceiveData(kDataSize);
+
+ ASSERT_TRUE(active_loader());
+ EXPECT_TRUE(active_loader()->deferred());
+}
+
+TEST_F(BufferedDataSourceTest, ExternalResource_Response206_CancelAfterDefer) {
+ set_preload(BufferedDataSource::METADATA);
+ InitializeWith206Response();
+
+ EXPECT_EQ(BufferedDataSource::METADATA, preload());
+ EXPECT_FALSE(is_local_source());
+ EXPECT_TRUE(loader()->range_supported());
+ EXPECT_EQ(BufferedResourceLoader::kReadThenDefer, defer_strategy());
+
+ data_source_->OnBufferingHaveEnough();
+
+ ASSERT_TRUE(active_loader());
+
+ // Read a bit from the beginning.
+ ReadAt(0);
+ EXPECT_CALL(*this, ReadCallback(kDataSize));
+ EXPECT_CALL(host_, AddBufferedByteRange(0, kDataSize - 1));
+ ReceiveData(kDataSize);
+
+ EXPECT_FALSE(active_loader());
+}
+
} // namespace media
diff --git a/chromium/media/blink/buffered_resource_loader.cc b/chromium/media/blink/buffered_resource_loader.cc
index d88266d06b2..51ec8fe99e3 100644
--- a/chromium/media/blink/buffered_resource_loader.cc
+++ b/chromium/media/blink/buffered_resource_loader.cc
@@ -54,12 +54,12 @@ static const int kForwardWaitThreshold = 2 * kMegabyte;
// Computes the suggested backward and forward capacity for the buffer
// if one wants to play at |playback_rate| * the natural playback speed.
// Use a value of 0 for |bitrate| if it is unknown.
-static void ComputeTargetBufferWindow(float playback_rate, int bitrate,
+static void ComputeTargetBufferWindow(double playback_rate, int bitrate,
int* out_backward_capacity,
int* out_forward_capacity) {
static const int kDefaultBitrate = 200 * 1024 * 8; // 200 Kbps.
static const int kMaxBitrate = 20 * kMegabyte * 8; // 20 Mbps.
- static const float kMaxPlaybackRate = 25.0;
+ static const double kMaxPlaybackRate = 25.0;
static const int kTargetSecondsBufferedAhead = 10;
static const int kTargetSecondsBufferedBehind = 2;
@@ -71,12 +71,12 @@ static void ComputeTargetBufferWindow(float playback_rate, int bitrate,
// Only scale the buffer window for playback rates greater than 1.0 in
// magnitude and clamp to prevent overflow.
bool backward_playback = false;
- if (playback_rate < 0.0f) {
+ if (playback_rate < 0.0) {
backward_playback = true;
- playback_rate *= -1.0f;
+ playback_rate *= -1.0;
}
- playback_rate = std::max(playback_rate, 1.0f);
+ playback_rate = std::max(playback_rate, 1.0);
playback_rate = std::min(playback_rate, kMaxPlaybackRate);
int bytes_per_second = (bitrate / 8.0) * playback_rate;
@@ -101,7 +101,7 @@ BufferedResourceLoader::BufferedResourceLoader(
int64 last_byte_position,
DeferStrategy strategy,
int bitrate,
- float playback_rate,
+ double playback_rate,
MediaLog* media_log)
: buffer_(kMinBufferCapacity, kMinBufferCapacity),
loader_failed_(false),
@@ -124,7 +124,8 @@ BufferedResourceLoader::BufferedResourceLoader(
last_offset_(0),
bitrate_(bitrate),
playback_rate_(playback_rate),
- media_log_(media_log) {
+ media_log_(media_log),
+ cancel_upon_deferral_(false) {
// Set the initial capacity of |buffer_| based on |bitrate_| and
// |playback_rate_|.
@@ -570,7 +571,7 @@ void BufferedResourceLoader::UpdateDeferStrategy(DeferStrategy strategy) {
UpdateDeferBehavior();
}
-void BufferedResourceLoader::SetPlaybackRate(float playback_rate) {
+void BufferedResourceLoader::SetPlaybackRate(double playback_rate) {
playback_rate_ = playback_rate;
// This is a pause so don't bother updating the buffer window as we'll likely
@@ -578,6 +579,10 @@ void BufferedResourceLoader::SetPlaybackRate(float playback_rate) {
if (playback_rate_ == 0.0)
return;
+ // Abort any cancellations in progress if playback starts.
+ if (playback_rate_ > 0 && cancel_upon_deferral_)
+ cancel_upon_deferral_ = false;
+
UpdateBufferWindow();
}
@@ -616,6 +621,9 @@ void BufferedResourceLoader::SetDeferred(bool deferred) {
active_loader_->SetDeferred(deferred);
loading_cb_.Run(deferred ? kLoadingDeferred : kLoading);
+
+ if (deferred && cancel_upon_deferral_)
+ CancelUponDeferral();
}
bool BufferedResourceLoader::ShouldDefer() const {
@@ -732,6 +740,12 @@ bool BufferedResourceLoader::ParseContentRange(
return true;
}
+void BufferedResourceLoader::CancelUponDeferral() {
+ cancel_upon_deferral_ = true;
+ if (active_loader_ && active_loader_->deferred())
+ active_loader_.reset();
+}
+
bool BufferedResourceLoader::VerifyPartialResponse(
const WebURLResponse& response) {
int64 first_byte_position, last_byte_position, instance_size;
diff --git a/chromium/media/blink/buffered_resource_loader.h b/chromium/media/blink/buffered_resource_loader.h
index 4f380eae157..cb0272063ec 100644
--- a/chromium/media/blink/buffered_resource_loader.h
+++ b/chromium/media/blink/buffered_resource_loader.h
@@ -85,7 +85,7 @@ class MEDIA_EXPORT BufferedResourceLoader
int64 last_byte_position,
DeferStrategy strategy,
int bitrate,
- float playback_rate,
+ double playback_rate,
MediaLog* media_log);
virtual ~BufferedResourceLoader();
@@ -179,7 +179,7 @@ class MEDIA_EXPORT BufferedResourceLoader
// Sets the playback rate to the given value and updates buffer window
// accordingly.
- void SetPlaybackRate(float playback_rate);
+ void SetPlaybackRate(double playback_rate);
// Sets the bitrate to the given value and updates buffer window
// accordingly.
@@ -198,6 +198,13 @@ class MEDIA_EXPORT BufferedResourceLoader
const std::string& content_range_str, int64* first_byte_position,
int64* last_byte_position, int64* instance_size);
+ // Cancels and closes any outstanding deferred ActiveLoader instances. Does
+ // not report a failed state, so subsequent read calls to cache may still
+ // complete okay. If the ActiveLoader is not deferred it will be canceled once
+ // it is unless playback starts before then (as determined by the reported
+ // playback rate).
+ void CancelUponDeferral();
+
private:
friend class BufferedDataSourceTest;
friend class BufferedResourceLoaderTest;
@@ -307,10 +314,12 @@ class MEDIA_EXPORT BufferedResourceLoader
int bitrate_;
// Playback rate of the media.
- float playback_rate_;
+ double playback_rate_;
scoped_refptr<MediaLog> media_log_;
+ bool cancel_upon_deferral_;
+
DISALLOW_COPY_AND_ASSIGN(BufferedResourceLoader);
};
diff --git a/chromium/media/blink/buffered_resource_loader_unittest.cc b/chromium/media/blink/buffered_resource_loader_unittest.cc
index 3e7eaa8a885..1fdd6d8844d 100644
--- a/chromium/media/blink/buffered_resource_loader_unittest.cc
+++ b/chromium/media/blink/buffered_resource_loader_unittest.cc
@@ -282,6 +282,8 @@ class BufferedResourceLoaderTest : public testing::Test {
EXPECT_LE(loader_->buffer_.backward_capacity(), kMaxBufferCapacity);
}
+ bool HasActiveLoader() { return loader_->active_loader_; }
+
MOCK_METHOD1(StartCallback, void(BufferedResourceLoader::Status));
MOCK_METHOD2(ReadCallback, void(BufferedResourceLoader::Status, int));
MOCK_METHOD1(LoadingCallback, void(BufferedResourceLoader::LoadingState));
@@ -1066,7 +1068,7 @@ TEST_F(BufferedResourceLoaderTest, BufferWindow_PlaybackRate_Zero) {
TEST_F(BufferedResourceLoaderTest, BufferWindow_PlaybackRate_BelowLowerBound) {
Initialize(kHttpUrl, -1, -1);
Start();
- loader_->SetPlaybackRate(0.1f);
+ loader_->SetPlaybackRate(0.1);
CheckBufferWindowBounds();
StopWhenLoad();
}
@@ -1128,4 +1130,30 @@ TEST(BufferedResourceLoaderStandaloneTest, ParseContentRange) {
kPositionNotSpecified);
}
+// Tests the data buffering logic of ReadThenDefer strategy.
+TEST_F(BufferedResourceLoaderTest, CancelAfterDeferral) {
+ Initialize(kHttpUrl, 10, 99);
+ SetLoaderBuffer(10, 20);
+ loader_->UpdateDeferStrategy(BufferedResourceLoader::kReadThenDefer);
+ loader_->CancelUponDeferral();
+ Start();
+ PartialResponse(10, 99, 100);
+
+ uint8 buffer[10];
+
+ // Make an outstanding read request.
+ ReadLoader(10, 10, buffer);
+
+ // Receive almost enough data to cover, shouldn't defer.
+ WriteLoader(10, 9);
+ EXPECT_TRUE(HasActiveLoader());
+
+ // As soon as we have received enough data to fulfill the read, defer.
+ EXPECT_CALL(*this, LoadingCallback(BufferedResourceLoader::kLoadingDeferred));
+ EXPECT_CALL(*this, ReadCallback(BufferedResourceLoader::kOk, 10));
+ WriteLoader(19, 1);
+ VerifyBuffer(buffer, 10, 10);
+ EXPECT_FALSE(HasActiveLoader());
+}
+
} // namespace media
diff --git a/chromium/media/blink/cdm_result_promise.h b/chromium/media/blink/cdm_result_promise.h
index 6ae5778be2f..57313df3ebe 100644
--- a/chromium/media/blink/cdm_result_promise.h
+++ b/chromium/media/blink/cdm_result_promise.h
@@ -10,6 +10,7 @@
#include "media/base/media_keys.h"
#include "media/blink/cdm_result_promise_helper.h"
#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
+#include "third_party/WebKit/public/platform/WebString.h"
namespace media {
@@ -25,13 +26,13 @@ class CdmResultPromise : public media::CdmPromiseTemplate<T...> {
public:
CdmResultPromise(const blink::WebContentDecryptionModuleResult& result,
const std::string& uma_name);
- virtual ~CdmResultPromise();
+ ~CdmResultPromise() override;
// CdmPromiseTemplate<T> implementation.
- virtual void resolve(const T&... result) override;
- virtual void reject(media::MediaKeys::Exception exception_code,
- uint32 system_code,
- const std::string& error_message) override;
+ void resolve(const T&... result) override;
+ void reject(media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) override;
private:
using media::CdmPromiseTemplate<T...>::MarkPromiseSettled;
@@ -64,14 +65,6 @@ inline void CdmResultPromise<>::resolve() {
web_cdm_result_.complete();
}
-template <>
-inline void CdmResultPromise<media::KeyIdsVector>::resolve(
- const media::KeyIdsVector& result) {
- // TODO(jrummell): Update blink::WebContentDecryptionModuleResult to
- // handle the set of keys.
- reject(media::MediaKeys::NOT_SUPPORTED_ERROR, 0, "Not implemented.");
-}
-
template <typename... T>
void CdmResultPromise<T...>::reject(media::MediaKeys::Exception exception_code,
uint32 system_code,
diff --git a/chromium/media/blink/cdm_session_adapter.cc b/chromium/media/blink/cdm_session_adapter.cc
new file mode 100644
index 00000000000..46147694d0d
--- /dev/null
+++ b/chromium/media/blink/cdm_session_adapter.cc
@@ -0,0 +1,198 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/blink/cdm_session_adapter.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "media/base/cdm_factory.h"
+#include "media/base/cdm_key_information.h"
+#include "media/base/cdm_promise.h"
+#include "media/base/key_systems.h"
+#include "media/blink/webcontentdecryptionmodule_impl.h"
+#include "media/blink/webcontentdecryptionmodulesession_impl.h"
+#include "url/gurl.h"
+
+namespace media {
+
+const char kMediaEME[] = "Media.EME.";
+const char kDot[] = ".";
+
+CdmSessionAdapter::CdmSessionAdapter() : weak_ptr_factory_(this) {
+}
+
+CdmSessionAdapter::~CdmSessionAdapter() {}
+
+void CdmSessionAdapter::CreateCdm(
+ CdmFactory* cdm_factory,
+ const std::string& key_system,
+ const GURL& security_origin,
+ const CdmConfig& cdm_config,
+ blink::WebContentDecryptionModuleResult result) {
+ // Note: WebContentDecryptionModuleImpl::Create() calls this method without
+ // holding a reference to the CdmSessionAdapter. Bind OnCdmCreated() with
+ // |this| instead of |weak_this| to prevent |this| from being destructed.
+ base::WeakPtr<CdmSessionAdapter> weak_this = weak_ptr_factory_.GetWeakPtr();
+ cdm_factory->Create(
+ key_system, security_origin, cdm_config,
+ base::Bind(&CdmSessionAdapter::OnSessionMessage, weak_this),
+ base::Bind(&CdmSessionAdapter::OnSessionClosed, weak_this),
+ base::Bind(&CdmSessionAdapter::OnLegacySessionError, weak_this),
+ base::Bind(&CdmSessionAdapter::OnSessionKeysChange, weak_this),
+ base::Bind(&CdmSessionAdapter::OnSessionExpirationUpdate, weak_this),
+ base::Bind(&CdmSessionAdapter::OnCdmCreated, this, key_system, result));
+}
+
+void CdmSessionAdapter::SetServerCertificate(
+ const std::vector<uint8_t>& certificate,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ cdm_->SetServerCertificate(certificate, promise.Pass());
+}
+
+WebContentDecryptionModuleSessionImpl* CdmSessionAdapter::CreateSession() {
+ return new WebContentDecryptionModuleSessionImpl(this);
+}
+
+bool CdmSessionAdapter::RegisterSession(
+ const std::string& session_id,
+ base::WeakPtr<WebContentDecryptionModuleSessionImpl> session) {
+ // If this session ID is already registered, don't register it again.
+ if (ContainsKey(sessions_, session_id))
+ return false;
+
+ sessions_[session_id] = session;
+ return true;
+}
+
+void CdmSessionAdapter::UnregisterSession(const std::string& session_id) {
+ DCHECK(ContainsKey(sessions_, session_id));
+ sessions_.erase(session_id);
+}
+
+void CdmSessionAdapter::InitializeNewSession(
+ EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ MediaKeys::SessionType session_type,
+ scoped_ptr<NewSessionCdmPromise> promise) {
+ cdm_->CreateSessionAndGenerateRequest(session_type, init_data_type, init_data,
+ promise.Pass());
+}
+
+void CdmSessionAdapter::LoadSession(MediaKeys::SessionType session_type,
+ const std::string& session_id,
+ scoped_ptr<NewSessionCdmPromise> promise) {
+ cdm_->LoadSession(session_type, session_id, promise.Pass());
+}
+
+void CdmSessionAdapter::UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ cdm_->UpdateSession(session_id, response, promise.Pass());
+}
+
+void CdmSessionAdapter::CloseSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ cdm_->CloseSession(session_id, promise.Pass());
+}
+
+void CdmSessionAdapter::RemoveSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ cdm_->RemoveSession(session_id, promise.Pass());
+}
+
+CdmContext* CdmSessionAdapter::GetCdmContext() {
+ return cdm_->GetCdmContext();
+}
+
+const std::string& CdmSessionAdapter::GetKeySystem() const {
+ return key_system_;
+}
+
+const std::string& CdmSessionAdapter::GetKeySystemUMAPrefix() const {
+ return key_system_uma_prefix_;
+}
+
+void CdmSessionAdapter::OnCdmCreated(
+ const std::string& key_system,
+ blink::WebContentDecryptionModuleResult result,
+ scoped_ptr<MediaKeys> cdm,
+ const std::string& error_message) {
+ DVLOG(2) << __FUNCTION__;
+ if (!cdm) {
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
+ blink::WebString::fromUTF8(error_message));
+ return;
+ }
+
+ key_system_ = key_system;
+ key_system_uma_prefix_ =
+ kMediaEME + GetKeySystemNameForUMA(key_system) + kDot;
+ cdm_ = cdm.Pass();
+
+ result.completeWithContentDecryptionModule(
+ new WebContentDecryptionModuleImpl(this));
+}
+
+void CdmSessionAdapter::OnSessionMessage(
+ const std::string& session_id,
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8_t>& message,
+ const GURL& /* legacy_destination_url */) {
+ WebContentDecryptionModuleSessionImpl* session = GetSession(session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << session_id;
+ if (session)
+ session->OnSessionMessage(message_type, message);
+}
+
+void CdmSessionAdapter::OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info) {
+ // TODO(jrummell): Pass |keys_info| on.
+ WebContentDecryptionModuleSessionImpl* session = GetSession(session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << session_id;
+ if (session)
+ session->OnSessionKeysChange(has_additional_usable_key, keys_info.Pass());
+}
+
+void CdmSessionAdapter::OnSessionExpirationUpdate(
+ const std::string& session_id,
+ const base::Time& new_expiry_time) {
+ WebContentDecryptionModuleSessionImpl* session = GetSession(session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << session_id;
+ if (session)
+ session->OnSessionExpirationUpdate(new_expiry_time);
+}
+
+void CdmSessionAdapter::OnSessionClosed(const std::string& session_id) {
+ WebContentDecryptionModuleSessionImpl* session = GetSession(session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << session_id;
+ if (session)
+ session->OnSessionClosed();
+}
+
+void CdmSessionAdapter::OnLegacySessionError(
+ const std::string& session_id,
+ MediaKeys::Exception exception_code,
+ uint32_t system_code,
+ const std::string& error_message) {
+ // Error events not used by unprefixed EME.
+ // TODO(jrummell): Remove when prefixed EME removed.
+}
+
+WebContentDecryptionModuleSessionImpl* CdmSessionAdapter::GetSession(
+ const std::string& session_id) {
+ // Since session objects may get garbage collected, it is possible that there
+ // are events coming back from the CDM and the session has been unregistered.
+ // We can not tell if the CDM is firing events at sessions that never existed.
+ SessionMap::iterator session = sessions_.find(session_id);
+ return (session != sessions_.end()) ? session->second.get() : NULL;
+}
+
+} // namespace media
diff --git a/chromium/media/blink/cdm_session_adapter.h b/chromium/media/blink/cdm_session_adapter.h
new file mode 100644
index 00000000000..d70e801ab9c
--- /dev/null
+++ b/chromium/media/blink/cdm_session_adapter.h
@@ -0,0 +1,152 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BLINK_CDM_SESSION_ADAPTER_H_
+#define MEDIA_BLINK_CDM_SESSION_ADAPTER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/media_keys.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleSession.h"
+
+class GURL;
+
+namespace media {
+
+struct CdmConfig;
+class CdmFactory;
+class WebContentDecryptionModuleSessionImpl;
+
+// Owns the CDM instance and makes calls from session objects to the CDM.
+// Forwards the session ID-based callbacks of the MediaKeys interface to the
+// appropriate session object. Callers should hold references to this class
+// as long as they need the CDM instance.
+class CdmSessionAdapter : public base::RefCounted<CdmSessionAdapter> {
+ public:
+ CdmSessionAdapter();
+
+ // Creates the CDM for |key_system| using |cdm_factory| and returns the result
+ // via |result|.
+ void CreateCdm(CdmFactory* cdm_factory,
+ const std::string& key_system,
+ const GURL& security_origin,
+ const CdmConfig& cdm_config,
+ blink::WebContentDecryptionModuleResult result);
+
+ // Provides a server certificate to be used to encrypt messages to the
+ // license server.
+ void SetServerCertificate(const std::vector<uint8_t>& certificate,
+ scoped_ptr<SimpleCdmPromise> promise);
+
+ // Creates a new session and adds it to the internal map. The caller owns the
+ // created session. RemoveSession() must be called when destroying it, if
+ // RegisterSession() was called.
+ WebContentDecryptionModuleSessionImpl* CreateSession();
+
+ // Adds a session to the internal map. Called once the session is successfully
+ // initialized. Returns true if the session was registered, false if there is
+ // already an existing session with the same |session_id|.
+ bool RegisterSession(
+ const std::string& session_id,
+ base::WeakPtr<WebContentDecryptionModuleSessionImpl> session);
+
+ // Removes a session from the internal map.
+ void UnregisterSession(const std::string& session_id);
+
+ // Initializes a session with the |init_data_type|, |init_data| and
+ // |session_type| provided.
+ void InitializeNewSession(EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ MediaKeys::SessionType session_type,
+ scoped_ptr<NewSessionCdmPromise> promise);
+
+ // Loads the session specified by |session_id|.
+ void LoadSession(MediaKeys::SessionType session_type,
+ const std::string& session_id,
+ scoped_ptr<NewSessionCdmPromise> promise);
+
+ // Updates the session specified by |session_id| with |response|.
+ void UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
+ scoped_ptr<SimpleCdmPromise> promise);
+
+ // Closes the session specified by |session_id|.
+ void CloseSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise);
+
+ // Removes stored session data associated with the session specified by
+ // |session_id|.
+ void RemoveSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise);
+
+ // Returns the CdmContext associated with |media_keys_|.
+ // TODO(jrummell): Figure out lifetimes, as WMPI may still use the decryptor
+ // after WebContentDecryptionModule is freed. http://crbug.com/330324
+ CdmContext* GetCdmContext();
+
+ // Returns the key system name.
+ const std::string& GetKeySystem() const;
+
+ // Returns a prefix to use for UMAs.
+ const std::string& GetKeySystemUMAPrefix() const;
+
+ private:
+ friend class base::RefCounted<CdmSessionAdapter>;
+
+ // Session ID to WebContentDecryptionModuleSessionImpl mapping.
+ typedef base::hash_map<std::string,
+ base::WeakPtr<WebContentDecryptionModuleSessionImpl> >
+ SessionMap;
+
+ ~CdmSessionAdapter();
+
+ // Callback for CreateCdm().
+ void OnCdmCreated(const std::string& key_system,
+ blink::WebContentDecryptionModuleResult result,
+ scoped_ptr<MediaKeys> cdm,
+ const std::string& error_message);
+
+ // Callbacks for firing session events.
+ void OnSessionMessage(const std::string& session_id,
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8_t>& message,
+ const GURL& legacy_destination_url);
+ void OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info);
+ void OnSessionExpirationUpdate(const std::string& session_id,
+ const base::Time& new_expiry_time);
+ void OnSessionClosed(const std::string& session_id);
+ void OnLegacySessionError(const std::string& session_id,
+ MediaKeys::Exception exception_code,
+ uint32_t system_code,
+ const std::string& error_message);
+
+ // Helper function of the callbacks.
+ WebContentDecryptionModuleSessionImpl* GetSession(
+ const std::string& session_id);
+
+ scoped_ptr<MediaKeys> cdm_;
+
+ SessionMap sessions_;
+
+ std::string key_system_;
+ std::string key_system_uma_prefix_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<CdmSessionAdapter> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmSessionAdapter);
+};
+
+} // namespace media
+
+#endif // MEDIA_BLINK_CDM_SESSION_ADAPTER_H_
diff --git a/chromium/media/blink/encrypted_media_player_support.cc b/chromium/media/blink/encrypted_media_player_support.cc
index 663141f3ddc..e83efd9a527 100644
--- a/chromium/media/blink/encrypted_media_player_support.cc
+++ b/chromium/media/blink/encrypted_media_player_support.cc
@@ -4,12 +4,325 @@
#include "media/blink/encrypted_media_player_support.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/metrics/histogram.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/key_systems.h"
+#include "media/blink/webcontentdecryptionmodule_impl.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModule.h"
+#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
+#include "third_party/WebKit/public/web/WebDocument.h"
+#include "third_party/WebKit/public/web/WebLocalFrame.h"
+
+using blink::WebMediaPlayer;
+using blink::WebMediaPlayerClient;
+using blink::WebString;
+
namespace media {
-EncryptedMediaPlayerSupport::EncryptedMediaPlayerSupport() {
+#define BIND_TO_RENDER_LOOP(function) \
+ (BindToCurrentLoop(base::Bind(function, AsWeakPtr())))
+
+#define BIND_TO_RENDER_LOOP1(function, arg1) \
+ (BindToCurrentLoop(base::Bind(function, AsWeakPtr(), arg1)))
+
+// Prefix for histograms related to Encrypted Media Extensions.
+static const char* kMediaEme = "Media.EME.";
+
+// Convert a WebString to ASCII, falling back on an empty string in the case
+// of a non-ASCII string.
+static std::string ToASCIIOrEmpty(const WebString& string) {
+ return base::IsStringASCII(string) ? base::UTF16ToASCII(string)
+ : std::string();
+}
+
+// Helper functions to report media EME related stats to UMA. They follow the
+// convention of more commonly used macros UMA_HISTOGRAM_ENUMERATION and
+// UMA_HISTOGRAM_COUNTS. The reason that we cannot use those macros directly is
+// that UMA_* macros require the names to be constant throughout the process'
+// lifetime.
+static void EmeUMAHistogramEnumeration(const std::string& key_system,
+ const std::string& method,
+ int sample,
+ int boundary_value) {
+ base::LinearHistogram::FactoryGet(
+ kMediaEme + GetKeySystemNameForUMA(key_system) + "." + method,
+ 1, boundary_value, boundary_value + 1,
+ base::Histogram::kUmaTargetedHistogramFlag)->Add(sample);
+}
+
+static void EmeUMAHistogramCounts(const std::string& key_system,
+ const std::string& method,
+ int sample) {
+ // Use the same parameters as UMA_HISTOGRAM_COUNTS.
+ base::Histogram::FactoryGet(
+ kMediaEme + GetKeySystemNameForUMA(key_system) + "." + method,
+ 1, 1000000, 50, base::Histogram::kUmaTargetedHistogramFlag)->Add(sample);
+}
+
+// Helper enum for reporting generateKeyRequest/addKey histograms.
+enum MediaKeyException {
+ kUnknownResultId,
+ kSuccess,
+ kKeySystemNotSupported,
+ kInvalidPlayerState,
+ kMaxMediaKeyException
+};
+
+static MediaKeyException MediaKeyExceptionForUMA(
+ WebMediaPlayer::MediaKeyException e) {
+ switch (e) {
+ case WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported:
+ return kKeySystemNotSupported;
+ case WebMediaPlayer::MediaKeyExceptionInvalidPlayerState:
+ return kInvalidPlayerState;
+ case WebMediaPlayer::MediaKeyExceptionNoError:
+ return kSuccess;
+ default:
+ return kUnknownResultId;
+ }
+}
+
+// Helper for converting |key_system| name and exception |e| to a pair of enum
+// values from above, for reporting to UMA.
+static void ReportMediaKeyExceptionToUMA(const std::string& method,
+ const std::string& key_system,
+ WebMediaPlayer::MediaKeyException e) {
+ MediaKeyException result_id = MediaKeyExceptionForUMA(e);
+ DCHECK_NE(result_id, kUnknownResultId) << e;
+ EmeUMAHistogramEnumeration(
+ key_system, method, result_id, kMaxMediaKeyException);
+}
+
+// Guess the type of |init_data|. This is only used to handle some corner cases
+// so we keep it as simple as possible without breaking major use cases.
+static EmeInitDataType GuessInitDataType(const unsigned char* init_data,
+ unsigned init_data_length) {
+ // Most WebM files use KeyId of 16 bytes. CENC init data is always >16 bytes.
+ if (init_data_length == 16)
+ return EmeInitDataType::WEBM;
+
+ return EmeInitDataType::CENC;
+}
+
+EncryptedMediaPlayerSupport::EncryptedMediaPlayerSupport(
+ CdmFactory* cdm_factory,
+ blink::WebMediaPlayerClient* client,
+ MediaPermission* media_permission,
+ const CdmContextReadyCB& cdm_context_ready_cb)
+ : cdm_factory_(cdm_factory),
+ client_(client),
+ media_permission_(media_permission),
+ init_data_type_(EmeInitDataType::UNKNOWN),
+ cdm_context_ready_cb_(cdm_context_ready_cb) {
}
EncryptedMediaPlayerSupport::~EncryptedMediaPlayerSupport() {
}
+WebMediaPlayer::MediaKeyException
+EncryptedMediaPlayerSupport::GenerateKeyRequest(
+ blink::WebLocalFrame* frame,
+ const WebString& key_system,
+ const unsigned char* init_data,
+ unsigned init_data_length) {
+ DVLOG(1) << "generateKeyRequest: " << base::string16(key_system) << ": "
+ << std::string(reinterpret_cast<const char*>(init_data),
+ static_cast<size_t>(init_data_length));
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+
+ WebMediaPlayer::MediaKeyException e = GenerateKeyRequestInternal(
+ frame, ascii_key_system, init_data, init_data_length);
+ ReportMediaKeyExceptionToUMA("generateKeyRequest", ascii_key_system, e);
+ return e;
+}
+
+WebMediaPlayer::MediaKeyException
+EncryptedMediaPlayerSupport::GenerateKeyRequestInternal(
+ blink::WebLocalFrame* frame,
+ const std::string& key_system,
+ const unsigned char* init_data,
+ unsigned init_data_length) {
+ if (!PrefixedIsSupportedConcreteKeySystem(key_system))
+ return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
+
+ // |use_hw_secure_codecs| is only supported on Android, and Android (WMPA)
+ // does not use EncryptedMediaPlayerSupport.
+ bool use_hw_secure_codecs = false;
+
+ if (!proxy_decryptor_) {
+ DCHECK(current_key_system_.empty());
+ DCHECK(!cdm_context_ready_cb_.is_null());
+ proxy_decryptor_.reset(new ProxyDecryptor(
+ media_permission_, use_hw_secure_codecs,
+ BIND_TO_RENDER_LOOP(&EncryptedMediaPlayerSupport::OnKeyAdded),
+ BIND_TO_RENDER_LOOP(&EncryptedMediaPlayerSupport::OnKeyError),
+ BIND_TO_RENDER_LOOP(&EncryptedMediaPlayerSupport::OnKeyMessage)));
+
+ GURL security_origin(frame->document().securityOrigin().toString());
+ proxy_decryptor_->CreateCdm(cdm_factory_, key_system, security_origin,
+ cdm_context_ready_cb_);
+ current_key_system_ = key_system;
+ }
+
+ // We do not support run-time switching between key systems for now.
+ DCHECK(!current_key_system_.empty());
+ if (key_system != current_key_system_)
+ return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
+
+ EmeInitDataType init_data_type = init_data_type_;
+ if (init_data_type == EmeInitDataType::UNKNOWN)
+ init_data_type = GuessInitDataType(init_data, init_data_length);
+
+ proxy_decryptor_->GenerateKeyRequest(init_data_type, init_data,
+ init_data_length);
+
+ return WebMediaPlayer::MediaKeyExceptionNoError;
+}
+
+WebMediaPlayer::MediaKeyException EncryptedMediaPlayerSupport::AddKey(
+ const WebString& key_system,
+ const unsigned char* key,
+ unsigned key_length,
+ const unsigned char* init_data,
+ unsigned init_data_length,
+ const WebString& session_id) {
+ DVLOG(1) << "addKey: " << base::string16(key_system) << ": "
+ << std::string(reinterpret_cast<const char*>(key),
+ static_cast<size_t>(key_length)) << ", "
+ << std::string(reinterpret_cast<const char*>(init_data),
+ static_cast<size_t>(init_data_length)) << " ["
+ << base::string16(session_id) << "]";
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+ std::string ascii_session_id = ToASCIIOrEmpty(session_id);
+
+ WebMediaPlayer::MediaKeyException e = AddKeyInternal(ascii_key_system,
+ key,
+ key_length,
+ init_data,
+ init_data_length,
+ ascii_session_id);
+ ReportMediaKeyExceptionToUMA("addKey", ascii_key_system, e);
+ return e;
+}
+
+WebMediaPlayer::MediaKeyException
+EncryptedMediaPlayerSupport::AddKeyInternal(
+ const std::string& key_system,
+ const unsigned char* key,
+ unsigned key_length,
+ const unsigned char* init_data,
+ unsigned init_data_length,
+ const std::string& session_id) {
+ DCHECK(key);
+ DCHECK_GT(key_length, 0u);
+
+ if (!PrefixedIsSupportedConcreteKeySystem(key_system))
+ return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
+
+ if (current_key_system_.empty() || key_system != current_key_system_)
+ return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
+
+ proxy_decryptor_->AddKey(
+ key, key_length, init_data, init_data_length, session_id);
+ return WebMediaPlayer::MediaKeyExceptionNoError;
+}
+
+WebMediaPlayer::MediaKeyException
+EncryptedMediaPlayerSupport::CancelKeyRequest(
+ const WebString& key_system,
+ const WebString& session_id) {
+ DVLOG(1) << "cancelKeyRequest: " << base::string16(key_system) << ": "
+ << " [" << base::string16(session_id) << "]";
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+ std::string ascii_session_id = ToASCIIOrEmpty(session_id);
+
+ WebMediaPlayer::MediaKeyException e =
+ CancelKeyRequestInternal(ascii_key_system, ascii_session_id);
+ ReportMediaKeyExceptionToUMA("cancelKeyRequest", ascii_key_system, e);
+ return e;
+}
+
+WebMediaPlayer::MediaKeyException
+EncryptedMediaPlayerSupport::CancelKeyRequestInternal(
+ const std::string& key_system,
+ const std::string& session_id) {
+ if (!PrefixedIsSupportedConcreteKeySystem(key_system))
+ return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
+
+ if (current_key_system_.empty() || key_system != current_key_system_)
+ return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
+
+ proxy_decryptor_->CancelKeyRequest(session_id);
+ return WebMediaPlayer::MediaKeyExceptionNoError;
+}
+
+void EncryptedMediaPlayerSupport::SetInitDataType(
+ EmeInitDataType init_data_type) {
+ DCHECK(init_data_type != EmeInitDataType::UNKNOWN);
+ DLOG_IF(WARNING, init_data_type_ != EmeInitDataType::UNKNOWN &&
+ init_data_type != init_data_type_)
+ << "Mixed init data type not supported. The new type is ignored.";
+ if (init_data_type_ == EmeInitDataType::UNKNOWN)
+ init_data_type_ = init_data_type;
+}
+
+void EncryptedMediaPlayerSupport::OnPipelineDecryptError() {
+ EmeUMAHistogramCounts(current_key_system_, "DecryptError", 1);
+}
+
+void EncryptedMediaPlayerSupport::OnKeyAdded(const std::string& session_id) {
+ EmeUMAHistogramCounts(current_key_system_, "KeyAdded", 1);
+ client_->keyAdded(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
+ WebString::fromUTF8(session_id));
+}
+
+void EncryptedMediaPlayerSupport::OnKeyError(const std::string& session_id,
+ MediaKeys::KeyError error_code,
+ uint32 system_code) {
+ EmeUMAHistogramEnumeration(current_key_system_, "KeyError",
+ error_code, MediaKeys::kMaxKeyError);
+
+ uint16 short_system_code = 0;
+ if (system_code > std::numeric_limits<uint16>::max()) {
+ LOG(WARNING) << "system_code exceeds unsigned short limit.";
+ short_system_code = std::numeric_limits<uint16>::max();
+ } else {
+ short_system_code = static_cast<uint16>(system_code);
+ }
+
+ client_->keyError(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
+ WebString::fromUTF8(session_id),
+ static_cast<WebMediaPlayerClient::MediaKeyErrorCode>(error_code),
+ short_system_code);
+}
+
+void EncryptedMediaPlayerSupport::OnKeyMessage(
+ const std::string& session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url) {
+ DCHECK(destination_url.is_empty() || destination_url.is_valid());
+
+ client_->keyMessage(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
+ WebString::fromUTF8(session_id),
+ message.empty() ? NULL : &message[0],
+ base::saturated_cast<unsigned int>(message.size()),
+ destination_url);
+}
+
} // namespace media
diff --git a/chromium/media/blink/encrypted_media_player_support.h b/chromium/media/blink/encrypted_media_player_support.h
index 737720d2f2c..d3fd454b662 100644
--- a/chromium/media/blink/encrypted_media_player_support.h
+++ b/chromium/media/blink/encrypted_media_player_support.h
@@ -5,14 +5,22 @@
#ifndef MEDIA_BLINK_ENCRYPTED_MEDIA_PLAYER_SUPPORT_H_
#define MEDIA_BLINK_ENCRYPTED_MEDIA_PLAYER_SUPPORT_H_
-#include "media/base/decryptor.h"
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/cdm_context.h"
+#include "media/base/cdm_factory.h"
#include "media/base/demuxer.h"
-#include "media/base/media_export.h"
+#include "media/base/eme_constants.h"
+#include "media/cdm/proxy_decryptor.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
#include "third_party/WebKit/public/platform/WebMediaPlayer.h"
namespace blink {
class WebContentDecryptionModule;
-class WebContentDecryptionModuleResult;
class WebLocalFrame;
class WebMediaPlayerClient;
class WebString;
@@ -20,58 +28,92 @@ class WebString;
namespace media {
-class MEDIA_EXPORT EncryptedMediaPlayerSupport {
+class MediaPermission;
+class WebContentDecryptionModuleImpl;
+
+// Provides support to prefixed EME implementation.
+// Do NOT add unprefixed EME functionality to this class!
+// TODO(xhwang): When deprecating prefixed EME support, drop this whole file.
+class EncryptedMediaPlayerSupport
+ : public base::SupportsWeakPtr<EncryptedMediaPlayerSupport> {
public:
- EncryptedMediaPlayerSupport();
- virtual ~EncryptedMediaPlayerSupport();
+ using CdmContextReadyCB = ProxyDecryptor::CdmContextReadyCB;
+
+ // |cdm_context_ready_cb| is called when the CDM instance creation completes.
+ EncryptedMediaPlayerSupport(CdmFactory* cdm_factory,
+ blink::WebMediaPlayerClient* client,
+ MediaPermission* media_permission,
+ const CdmContextReadyCB& cdm_context_ready_cb);
+ ~EncryptedMediaPlayerSupport();
- // Prefixed API methods.
- virtual blink::WebMediaPlayer::MediaKeyException GenerateKeyRequest(
+ blink::WebMediaPlayer::MediaKeyException GenerateKeyRequest(
blink::WebLocalFrame* frame,
const blink::WebString& key_system,
const unsigned char* init_data,
- unsigned init_data_length) = 0;
+ unsigned init_data_length);
- virtual blink::WebMediaPlayer::MediaKeyException AddKey(
+ blink::WebMediaPlayer::MediaKeyException AddKey(
const blink::WebString& key_system,
const unsigned char* key,
unsigned key_length,
const unsigned char* init_data,
unsigned init_data_length,
- const blink::WebString& session_id) = 0;
+ const blink::WebString& session_id);
- virtual blink::WebMediaPlayer::MediaKeyException CancelKeyRequest(
+ blink::WebMediaPlayer::MediaKeyException CancelKeyRequest(
const blink::WebString& key_system,
- const blink::WebString& session_id) = 0;
+ const blink::WebString& session_id);
+
+ void SetInitDataType(EmeInitDataType init_data_type);
+
+ void OnPipelineDecryptError();
+
+ private:
+ blink::WebMediaPlayer::MediaKeyException GenerateKeyRequestInternal(
+ blink::WebLocalFrame* frame,
+ const std::string& key_system,
+ const unsigned char* init_data,
+ unsigned init_data_length);
+ blink::WebMediaPlayer::MediaKeyException AddKeyInternal(
+ const std::string& key_system,
+ const unsigned char* key,
+ unsigned key_length,
+ const unsigned char* init_data,
+ unsigned init_data_length,
+ const std::string& session_id);
- // Unprefixed API methods.
- virtual void SetInitialContentDecryptionModule(
- blink::WebContentDecryptionModule* initial_cdm) = 0;
- virtual void SetContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm) = 0;
- virtual void SetContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm,
- blink::WebContentDecryptionModuleResult result) = 0;
+ blink::WebMediaPlayer::MediaKeyException CancelKeyRequestInternal(
+ const std::string& key_system,
+ const std::string& session_id);
+ void OnKeyAdded(const std::string& session_id);
+ void OnKeyError(const std::string& session_id,
+ MediaKeys::KeyError error_code,
+ uint32 system_code);
+ void OnKeyMessage(const std::string& session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url);
- // Callback factory and notification methods used by WebMediaPlayerImpl.
+ CdmFactory* cdm_factory_;
- // Creates a callback that Demuxers can use to signal that the content
- // requires a key. This method make sure the callback returned can be safely
- // invoked from any thread.
- virtual Demuxer::NeedKeyCB CreateNeedKeyCB() = 0;
+ blink::WebMediaPlayerClient* client_;
- // Creates a callback that renderers can use to set decryptor
- // ready callback. This method make sure the callback returned can be safely
- // invoked from any thread.
- virtual SetDecryptorReadyCB CreateSetDecryptorReadyCB() = 0;
+ MediaPermission* media_permission_;
- // Called to inform this object that the media pipeline encountered
- // and handled a decryption error.
- virtual void OnPipelineDecryptError() = 0;
+ // The currently selected key system. Empty string means that no key system
+ // has been selected.
+ std::string current_key_system_;
+
+ // We assume all streams are from the same container, thus have the same
+ // init data type.
+ EmeInitDataType init_data_type_;
+
+ CdmContextReadyCB cdm_context_ready_cb_;
+
+ // Manages decryption keys and decrypts encrypted frames.
+ scoped_ptr<ProxyDecryptor> proxy_decryptor_;
- private:
DISALLOW_COPY_AND_ASSIGN(EncryptedMediaPlayerSupport);
};
diff --git a/chromium/media/blink/key_system_config_selector.cc b/chromium/media/blink/key_system_config_selector.cc
new file mode 100644
index 00000000000..e8f994fd7a8
--- /dev/null
+++ b/chromium/media/blink/key_system_config_selector.cc
@@ -0,0 +1,782 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "key_system_config_selector.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "media/base/cdm_config.h"
+#include "media/base/key_systems.h"
+#include "media/base/media_permission.h"
+#include "media/blink/webmediaplayer_util.h"
+#include "net/base/mime_util.h"
+#include "third_party/WebKit/public/platform/WebMediaKeySystemConfiguration.h"
+#include "third_party/WebKit/public/platform/WebSecurityOrigin.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+#include "third_party/WebKit/public/platform/WebVector.h"
+#include "url/gurl.h"
+
+namespace media {
+
+using EmeFeatureRequirement =
+ blink::WebMediaKeySystemConfiguration::Requirement;
+
+namespace {
+
+static EmeConfigRule GetSessionTypeConfigRule(EmeSessionTypeSupport support) {
+ switch (support) {
+ case EmeSessionTypeSupport::INVALID:
+ NOTREACHED();
+ return EmeConfigRule::NOT_SUPPORTED;
+ case EmeSessionTypeSupport::NOT_SUPPORTED:
+ return EmeConfigRule::NOT_SUPPORTED;
+ case EmeSessionTypeSupport::SUPPORTED_WITH_IDENTIFIER:
+ return EmeConfigRule::IDENTIFIER_AND_PERSISTENCE_REQUIRED;
+ case EmeSessionTypeSupport::SUPPORTED:
+ return EmeConfigRule::PERSISTENCE_REQUIRED;
+ }
+ NOTREACHED();
+ return EmeConfigRule::NOT_SUPPORTED;
+}
+
+static EmeConfigRule GetDistinctiveIdentifierConfigRule(
+ EmeFeatureSupport support,
+ EmeFeatureRequirement requirement) {
+ if (support == EmeFeatureSupport::INVALID) {
+ NOTREACHED();
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+
+ // For NOT_ALLOWED and REQUIRED, the result is as expected. For OPTIONAL, we
+ // return the most restrictive rule that is not more restrictive than for
+ // NOT_ALLOWED or REQUIRED. Those values will be checked individually when
+ // the option is resolved.
+ //
+ // NOT_ALLOWED OPTIONAL REQUIRED
+ // NOT_SUPPORTED I_NOT_ALLOWED I_NOT_ALLOWED NOT_SUPPORTED
+ // REQUESTABLE I_NOT_ALLOWED SUPPORTED I_REQUIRED
+ // ALWAYS_ENABLED NOT_SUPPORTED I_REQUIRED I_REQUIRED
+ DCHECK(support == EmeFeatureSupport::NOT_SUPPORTED ||
+ support == EmeFeatureSupport::REQUESTABLE ||
+ support == EmeFeatureSupport::ALWAYS_ENABLED);
+ DCHECK(requirement == EmeFeatureRequirement::NotAllowed ||
+ requirement == EmeFeatureRequirement::Optional ||
+ requirement == EmeFeatureRequirement::Required);
+ if ((support == EmeFeatureSupport::NOT_SUPPORTED &&
+ requirement == EmeFeatureRequirement::Required) ||
+ (support == EmeFeatureSupport::ALWAYS_ENABLED &&
+ requirement == EmeFeatureRequirement::NotAllowed)) {
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+ if (support == EmeFeatureSupport::REQUESTABLE &&
+ requirement == EmeFeatureRequirement::Optional) {
+ return EmeConfigRule::SUPPORTED;
+ }
+ if (support == EmeFeatureSupport::NOT_SUPPORTED ||
+ requirement == EmeFeatureRequirement::NotAllowed) {
+ return EmeConfigRule::IDENTIFIER_NOT_ALLOWED;
+ }
+ return EmeConfigRule::IDENTIFIER_REQUIRED;
+}
+
+static EmeConfigRule GetPersistentStateConfigRule(
+ EmeFeatureSupport support,
+ EmeFeatureRequirement requirement) {
+ if (support == EmeFeatureSupport::INVALID) {
+ NOTREACHED();
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+
+ // For NOT_ALLOWED and REQUIRED, the result is as expected. For OPTIONAL, we
+ // return the most restrictive rule that is not more restrictive than for
+ // NOT_ALLOWED or REQUIRED. Those values will be checked individually when
+ // the option is resolved.
+ //
+ // Note that even though a distinctive identifier can not be required for
+ // persistent state, it may still be required for persistent sessions.
+ //
+ // NOT_ALLOWED OPTIONAL REQUIRED
+ // NOT_SUPPORTED P_NOT_ALLOWED P_NOT_ALLOWED NOT_SUPPORTED
+ // REQUESTABLE P_NOT_ALLOWED SUPPORTED P_REQUIRED
+ // ALWAYS_ENABLED NOT_SUPPORTED P_REQUIRED P_REQUIRED
+ DCHECK(support == EmeFeatureSupport::NOT_SUPPORTED ||
+ support == EmeFeatureSupport::REQUESTABLE ||
+ support == EmeFeatureSupport::ALWAYS_ENABLED);
+ DCHECK(requirement == EmeFeatureRequirement::NotAllowed ||
+ requirement == EmeFeatureRequirement::Optional ||
+ requirement == EmeFeatureRequirement::Required);
+ if ((support == EmeFeatureSupport::NOT_SUPPORTED &&
+ requirement == EmeFeatureRequirement::Required) ||
+ (support == EmeFeatureSupport::ALWAYS_ENABLED &&
+ requirement == EmeFeatureRequirement::NotAllowed)) {
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+ if (support == EmeFeatureSupport::REQUESTABLE &&
+ requirement == EmeFeatureRequirement::Optional) {
+ return EmeConfigRule::SUPPORTED;
+ }
+ if (support == EmeFeatureSupport::NOT_SUPPORTED ||
+ requirement == EmeFeatureRequirement::NotAllowed) {
+ return EmeConfigRule::PERSISTENCE_NOT_ALLOWED;
+ }
+ return EmeConfigRule::PERSISTENCE_REQUIRED;
+}
+
+} // namespace
+
+struct KeySystemConfigSelector::SelectionRequest {
+ std::string key_system;
+ blink::WebVector<blink::WebMediaKeySystemConfiguration>
+ candidate_configurations;
+ blink::WebSecurityOrigin security_origin;
+ base::Callback<void(const blink::WebMediaKeySystemConfiguration&,
+ const CdmConfig&)> succeeded_cb;
+ base::Callback<void(const blink::WebString&)> not_supported_cb;
+ bool was_permission_requested = false;
+ bool is_permission_granted = false;
+ bool are_secure_codecs_supported = false;
+};
+
+// Accumulates configuration rules to determine if a feature (additional
+// configuration rule) can be added to an accumulated configuration.
+class KeySystemConfigSelector::ConfigState {
+ public:
+ ConfigState(bool was_permission_requested, bool is_permission_granted)
+ : was_permission_requested_(was_permission_requested),
+ is_permission_granted_(is_permission_granted) {}
+
+ bool IsPermissionGranted() const { return is_permission_granted_; }
+
+ // Permission is possible if it has not been denied.
+ bool IsPermissionPossible() const {
+ return is_permission_granted_ || !was_permission_requested_;
+ }
+
+ bool IsIdentifierRequired() const { return is_identifier_required_; }
+
+ bool IsIdentifierRecommended() const { return is_identifier_recommended_; }
+
+ bool AreHwSecureCodecsRequired() const {
+ return are_hw_secure_codecs_required_;
+ }
+
+ // Checks whether a rule is compatible with all previously added rules.
+ bool IsRuleSupported(EmeConfigRule rule) const {
+ switch (rule) {
+ case EmeConfigRule::NOT_SUPPORTED:
+ return false;
+ case EmeConfigRule::IDENTIFIER_NOT_ALLOWED:
+ return !is_identifier_required_;
+ case EmeConfigRule::IDENTIFIER_REQUIRED:
+ // TODO(sandersd): Confirm if we should be refusing these rules when
+ // permission has been denied (as the spec currently says).
+ return !is_identifier_not_allowed_ && IsPermissionPossible();
+ case EmeConfigRule::IDENTIFIER_RECOMMENDED:
+ return true;
+ case EmeConfigRule::PERSISTENCE_NOT_ALLOWED:
+ return !is_persistence_required_;
+ case EmeConfigRule::PERSISTENCE_REQUIRED:
+ return !is_persistence_not_allowed_;
+ case EmeConfigRule::IDENTIFIER_AND_PERSISTENCE_REQUIRED:
+ return (!is_identifier_not_allowed_ && IsPermissionPossible() &&
+ !is_persistence_not_allowed_);
+ case EmeConfigRule::HW_SECURE_CODECS_NOT_ALLOWED:
+ return !are_hw_secure_codecs_required_;
+ case EmeConfigRule::HW_SECURE_CODECS_REQUIRED:
+ return !are_hw_secure_codecs_not_allowed_;
+ case EmeConfigRule::SUPPORTED:
+ return true;
+ }
+ NOTREACHED();
+ return false;
+ }
+
+ // Add a rule to the accumulated configuration state.
+ void AddRule(EmeConfigRule rule) {
+ DCHECK(IsRuleSupported(rule));
+ switch (rule) {
+ case EmeConfigRule::NOT_SUPPORTED:
+ NOTREACHED();
+ return;
+ case EmeConfigRule::IDENTIFIER_NOT_ALLOWED:
+ is_identifier_not_allowed_ = true;
+ return;
+ case EmeConfigRule::IDENTIFIER_REQUIRED:
+ is_identifier_required_ = true;
+ return;
+ case EmeConfigRule::IDENTIFIER_RECOMMENDED:
+ is_identifier_recommended_ = true;
+ return;
+ case EmeConfigRule::PERSISTENCE_NOT_ALLOWED:
+ is_persistence_not_allowed_ = true;
+ return;
+ case EmeConfigRule::PERSISTENCE_REQUIRED:
+ is_persistence_required_ = true;
+ return;
+ case EmeConfigRule::IDENTIFIER_AND_PERSISTENCE_REQUIRED:
+ is_identifier_required_ = true;
+ is_persistence_required_ = true;
+ return;
+ case EmeConfigRule::HW_SECURE_CODECS_NOT_ALLOWED:
+ are_hw_secure_codecs_not_allowed_ = true;
+ return;
+ case EmeConfigRule::HW_SECURE_CODECS_REQUIRED:
+ are_hw_secure_codecs_required_ = true;
+ return;
+ case EmeConfigRule::SUPPORTED:
+ return;
+ }
+ NOTREACHED();
+ }
+
+ private:
+ // Whether permission to use a distinctive identifier was requested. If set,
+ // |is_permission_granted_| represents the final decision.
+ // (Not changed by adding rules.)
+ bool was_permission_requested_;
+
+ // Whether permission to use a distinctive identifier has been granted.
+ // (Not changed by adding rules.)
+ bool is_permission_granted_;
+
+ // Whether a rule has been added that requires or blocks a distinctive
+ // identifier.
+ bool is_identifier_required_ = false;
+ bool is_identifier_not_allowed_ = false;
+
+ // Whether a rule has been added that recommends a distinctive identifier.
+ bool is_identifier_recommended_ = false;
+
+ // Whether a rule has been added that requires or blocks persistent state.
+ bool is_persistence_required_ = false;
+ bool is_persistence_not_allowed_ = false;
+
+ // Whether a rule has been added that requires or blocks hardware-secure
+ // codecs.
+ bool are_hw_secure_codecs_required_ = false;
+ bool are_hw_secure_codecs_not_allowed_ = false;
+};
+
+KeySystemConfigSelector::KeySystemConfigSelector(
+ const KeySystems* key_systems,
+ MediaPermission* media_permission)
+ : key_systems_(key_systems),
+ media_permission_(media_permission),
+ weak_factory_(this) {
+ DCHECK(key_systems_);
+ DCHECK(media_permission_);
+}
+
+KeySystemConfigSelector::~KeySystemConfigSelector() {
+}
+
+bool KeySystemConfigSelector::IsSupportedContentType(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& container_mime_type,
+ const std::string& codecs,
+ KeySystemConfigSelector::ConfigState* config_state) {
+ // TODO(sandersd): Move contentType parsing from Blink to here so that invalid
+ // parameters can be rejected. http://crbug.com/417561
+ std::string container_lower = base::StringToLowerASCII(container_mime_type);
+
+ // Check that |container_mime_type| is supported by Chrome.
+ if (!net::IsSupportedMediaMimeType(container_lower))
+ return false;
+
+ // Check that |codecs| are supported by Chrome. This is done primarily to
+ // validate extended codecs, but it also ensures that the CDM cannot support
+ // codecs that Chrome does not (which could complicate the robustness
+ // algorithm).
+ std::vector<std::string> codec_vector;
+ net::ParseCodecString(codecs, &codec_vector, false);
+ if (!codec_vector.empty() &&
+ (net::IsSupportedStrictMediaMimeType(container_lower, codec_vector) !=
+ net::IsSupported)) {
+ return false;
+ }
+
+ // Check that |container_mime_type| and |codecs| are supported by the CDM.
+ // This check does not handle extended codecs, so extended codec information
+ // is stripped (extended codec information was checked above).
+ std::vector<std::string> stripped_codec_vector;
+ net::ParseCodecString(codecs, &stripped_codec_vector, true);
+ EmeConfigRule codecs_rule = key_systems_->GetContentTypeConfigRule(
+ key_system, media_type, container_lower, stripped_codec_vector);
+ if (!config_state->IsRuleSupported(codecs_rule))
+ return false;
+ config_state->AddRule(codecs_rule);
+
+ return true;
+}
+
+bool KeySystemConfigSelector::GetSupportedCapabilities(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const blink::WebVector<blink::WebMediaKeySystemMediaCapability>&
+ requested_media_capabilities,
+ KeySystemConfigSelector::ConfigState* config_state,
+ std::vector<blink::WebMediaKeySystemMediaCapability>*
+ supported_media_capabilities) {
+ // From
+ // https://w3c.github.io/encrypted-media/#get-supported-capabilities-for-media-type
+ // 1. Let local accumulated capabilities be a local copy of partial
+ // configuration.
+ // (Skipped as we directly update |config_state|. This is safe because we
+ // only do so when at least one requested media capability is supported.)
+ // 2. Let supported media capabilities be empty.
+ DCHECK_EQ(supported_media_capabilities->size(), 0ul);
+ // 3. For each value in requested media capabilities:
+ for (size_t i = 0; i < requested_media_capabilities.size(); i++) {
+ // 3.1. Let contentType be the value's contentType member.
+ // 3.2. Let robustness be the value's robustness member.
+ const blink::WebMediaKeySystemMediaCapability& capability =
+ requested_media_capabilities[i];
+ // 3.3. If contentType is the empty string, return null.
+ if (capability.mimeType.isEmpty()) {
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "a capability contentType was empty.";
+ return false;
+ }
+
+ // 3.4-3.11. (Implemented by IsSupportedContentType().)
+ ConfigState proposed_config_state = *config_state;
+ if (!base::IsStringASCII(capability.mimeType) ||
+ !base::IsStringASCII(capability.codecs) ||
+ !IsSupportedContentType(key_system, media_type,
+ base::UTF16ToASCII(capability.mimeType),
+ base::UTF16ToASCII(capability.codecs),
+ &proposed_config_state)) {
+ continue;
+ }
+ // 3.12. If robustness is not the empty string, run the following steps:
+ if (!capability.robustness.isEmpty()) {
+ // 3.12.1. If robustness is an unrecognized value or not supported by
+ // implementation, continue to the next iteration. String
+ // comparison is case-sensitive.
+ if (!base::IsStringASCII(capability.robustness))
+ continue;
+ EmeConfigRule robustness_rule = key_systems_->GetRobustnessConfigRule(
+ key_system, media_type, base::UTF16ToASCII(capability.robustness));
+ if (!proposed_config_state.IsRuleSupported(robustness_rule))
+ continue;
+ proposed_config_state.AddRule(robustness_rule);
+ // 3.12.2. Add robustness to configuration.
+ // (It's already added, we use capability as configuration.)
+ }
+ // 3.13. If the user agent and implementation do not support playback of
+ // encrypted media data as specified by configuration, including all
+ // media types, in combination with local accumulated capabilities,
+ // continue to the next iteration.
+ // (This is handled when adding rules to |proposed_config_state|.)
+ // 3.14. Add configuration to supported media capabilities.
+ supported_media_capabilities->push_back(capability);
+ // 3.15. Add configuration to local accumulated capabilities.
+ *config_state = proposed_config_state;
+ }
+ // 4. If supported media capabilities is empty, return null.
+ if (supported_media_capabilities->empty()) {
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "no capabilities were supported.";
+ return false;
+ }
+ // 5. Return media type capabilities.
+ return true;
+}
+
+KeySystemConfigSelector::ConfigurationSupport
+KeySystemConfigSelector::GetSupportedConfiguration(
+ const std::string& key_system,
+ const blink::WebMediaKeySystemConfiguration& candidate,
+ ConfigState* config_state,
+ blink::WebMediaKeySystemConfiguration* accumulated_configuration) {
+ // From https://w3c.github.io/encrypted-media/#get-supported-configuration
+ // 1. Let accumulated configuration be empty. (Done by caller.)
+ // 2. If the initDataTypes member is present in candidate configuration, run
+ // the following steps:
+ if (candidate.hasInitDataTypes) {
+ // 2.1. Let supported types be empty.
+ std::vector<blink::WebEncryptedMediaInitDataType> supported_types;
+
+ // 2.2. For each value in candidate configuration's initDataTypes member:
+ for (size_t i = 0; i < candidate.initDataTypes.size(); i++) {
+ // 2.2.1. Let initDataType be the value.
+ blink::WebEncryptedMediaInitDataType init_data_type =
+ candidate.initDataTypes[i];
+ // 2.2.2. If the implementation supports generating requests based on
+ // initDataType, add initDataType to supported types. String
+ // comparison is case-sensitive. The empty string is never
+ // supported.
+ if (key_systems_->IsSupportedInitDataType(
+ key_system, ConvertToEmeInitDataType(init_data_type))) {
+ supported_types.push_back(init_data_type);
+ }
+ }
+
+ // 2.3. If supported types is empty, return null.
+ if (supported_types.empty()) {
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "no initDataType values were supported.";
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+
+ // 2.4. Add supported types to accumulated configuration.
+ accumulated_configuration->initDataTypes = supported_types;
+ }
+
+ // 3. Follow the steps for the value of candidate configuration's
+ // distinctiveIdentifier member from the following list:
+ // - "required": If the implementation does not support a persistent
+ // Distinctive Identifier in combination with accumulated
+ // configuration, return null.
+ // - "optional": Continue.
+ // - "not-allowed": If the implementation requires a Distinctive
+ // Identifier in combination with accumulated configuration, return
+ // null.
+ // We also reject OPTIONAL when distinctive identifiers are ALWAYS_ENABLED and
+ // permission has already been denied. This would happen anyway at step 11.
+ EmeConfigRule di_rule = GetDistinctiveIdentifierConfigRule(
+ key_systems_->GetDistinctiveIdentifierSupport(key_system),
+ candidate.distinctiveIdentifier);
+ if (!config_state->IsRuleSupported(di_rule)) {
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "the distinctiveIdentifier requirement was not supported.";
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+ config_state->AddRule(di_rule);
+
+ // 4. Add the value of the candidate configuration's distinctiveIdentifier
+ // member to accumulated configuration.
+ accumulated_configuration->distinctiveIdentifier =
+ candidate.distinctiveIdentifier;
+
+ // 5. Follow the steps for the value of candidate configuration's
+ // persistentState member from the following list:
+ // - "required": If the implementation does not support persisting state
+ // in combination with accumulated configuration, return null.
+ // - "optional": Continue.
+ // - "not-allowed": If the implementation requires persisting state in
+ // combination with accumulated configuration, return null.
+ EmeConfigRule ps_rule = GetPersistentStateConfigRule(
+ key_systems_->GetPersistentStateSupport(key_system),
+ candidate.persistentState);
+ if (!config_state->IsRuleSupported(ps_rule)) {
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "the persistentState requirement was not supported.";
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+ config_state->AddRule(ps_rule);
+
+ // 6. Add the value of the candidate configuration's persistentState
+ // member to accumulated configuration.
+ accumulated_configuration->persistentState = candidate.persistentState;
+
+ // 7. Follow the steps for the first matching condition from the following
+ // list:
+ // - If the sessionTypes member is present in candidate configuration,
+ // let session types be candidate configuration's sessionTypes member.
+ // - Otherwise, let session types be [ "temporary" ].
+ blink::WebVector<blink::WebEncryptedMediaSessionType> session_types;
+ if (candidate.hasSessionTypes) {
+ session_types = candidate.sessionTypes;
+ } else {
+ std::vector<blink::WebEncryptedMediaSessionType> temporary(1);
+ temporary[0] = blink::WebEncryptedMediaSessionType::Temporary;
+ session_types = temporary;
+ }
+
+ // 8. For each value in session types:
+ for (size_t i = 0; i < session_types.size(); i++) {
+ // 8.1. Let session type be the value.
+ blink::WebEncryptedMediaSessionType session_type = session_types[i];
+ // 8.2. If the implementation does not support session type in combination
+ // with accumulated configuration, return null.
+ // 8.3. If session type is "persistent-license" or
+ // "persistent-release-message", follow the steps for accumulated
+ // configuration's persistentState value from the following list:
+ // - "required": Continue.
+ // - "optional": Change accumulated configuration's persistentState
+ // value to "required".
+ // - "not-allowed": Return null.
+ EmeConfigRule session_type_rule = EmeConfigRule::NOT_SUPPORTED;
+ switch (session_type) {
+ case blink::WebEncryptedMediaSessionType::Unknown:
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "a required session type was not recognized.";
+ return CONFIGURATION_NOT_SUPPORTED;
+ case blink::WebEncryptedMediaSessionType::Temporary:
+ session_type_rule = EmeConfigRule::SUPPORTED;
+ break;
+ case blink::WebEncryptedMediaSessionType::PersistentLicense:
+ session_type_rule = GetSessionTypeConfigRule(
+ key_systems_->GetPersistentLicenseSessionSupport(key_system));
+ break;
+ case blink::WebEncryptedMediaSessionType::PersistentReleaseMessage:
+ session_type_rule = GetSessionTypeConfigRule(
+ key_systems_->GetPersistentReleaseMessageSessionSupport(
+ key_system));
+ break;
+ }
+ if (!config_state->IsRuleSupported(session_type_rule)) {
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "a required session type was not supported.";
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+ config_state->AddRule(session_type_rule);
+ }
+
+ // 9. Add session types to accumulated configuration.
+ accumulated_configuration->sessionTypes = session_types;
+
+ // 10. If the videoCapabilities member is present in candidate configuration:
+ if (candidate.hasVideoCapabilities) {
+ // 10.1. Let video capabilities be the result of executing the Get Supported
+ // Capabilities for Media Type algorithm on Video, candidate
+ // configuration's videoCapabilities member, and accumulated
+ // configuration.
+ // 10.2. If video capabilities is null, return null.
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities;
+ if (!GetSupportedCapabilities(key_system, EmeMediaType::VIDEO,
+ candidate.videoCapabilities, config_state,
+ &video_capabilities)) {
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+
+ // 10.3. Add video capabilities to accumulated configuration.
+ accumulated_configuration->videoCapabilities = video_capabilities;
+ }
+
+ // 11. If the audioCapabilities member is present in candidate configuration:
+ if (candidate.hasAudioCapabilities) {
+ // 11.1. Let audio capabilities be the result of executing the Get Supported
+ // Capabilities for Media Type algorithm on Audio, candidate
+ // configuration's audioCapabilities member, and accumulated
+ // configuration.
+ // 11.2. If audio capabilities is null, return null.
+ std::vector<blink::WebMediaKeySystemMediaCapability> audio_capabilities;
+ if (!GetSupportedCapabilities(key_system, EmeMediaType::AUDIO,
+ candidate.audioCapabilities, config_state,
+ &audio_capabilities)) {
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+
+ // 11.3. Add audio capabilities to accumulated configuration.
+ accumulated_configuration->audioCapabilities = audio_capabilities;
+ }
+
+ // 12. If accumulated configuration's distinctiveIdentifier value is
+ // "optional", follow the steps for the first matching condition from the
+ // following list:
+ // - If the implementation requires a Distinctive Identifier for any of
+ // the combinations in accumulated configuration, change accumulated
+ // configuration's distinctiveIdentifier value to "required".
+ // - Otherwise, change accumulated configuration's distinctiveIdentifier
+ // value to "not-allowed".
+ if (accumulated_configuration->distinctiveIdentifier ==
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional) {
+ EmeConfigRule not_allowed_rule = GetDistinctiveIdentifierConfigRule(
+ key_systems_->GetDistinctiveIdentifierSupport(key_system),
+ EmeFeatureRequirement::NotAllowed);
+ EmeConfigRule required_rule = GetDistinctiveIdentifierConfigRule(
+ key_systems_->GetDistinctiveIdentifierSupport(key_system),
+ EmeFeatureRequirement::Required);
+ bool not_allowed_supported =
+ config_state->IsRuleSupported(not_allowed_rule);
+ bool required_supported = config_state->IsRuleSupported(required_rule);
+ // If a distinctive identifier is recommend and that is a possible outcome,
+ // prefer that.
+ if (required_supported && config_state->IsIdentifierRecommended() &&
+ config_state->IsPermissionPossible()) {
+ not_allowed_supported = false;
+ }
+ if (not_allowed_supported) {
+ accumulated_configuration->distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed;
+ config_state->AddRule(not_allowed_rule);
+ } else if (required_supported) {
+ accumulated_configuration->distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Required;
+ config_state->AddRule(required_rule);
+ } else {
+ // We should not have passed step 3.
+ NOTREACHED();
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+ }
+
+ // 13. If accumulated configuration's persistentState value is "optional",
+ // follow the steps for the first matching condition from the following
+ // list:
+ // - If the implementation requires persisting state for any of the
+ // combinations in accumulated configuration, change accumulated
+ // configuration's persistentState value to "required".
+ // - Otherwise, change accumulated configuration's persistentState value
+ // to "not-allowed".
+ if (accumulated_configuration->persistentState ==
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional) {
+ EmeConfigRule not_allowed_rule = GetPersistentStateConfigRule(
+ key_systems_->GetPersistentStateSupport(key_system),
+ EmeFeatureRequirement::NotAllowed);
+ EmeConfigRule required_rule = GetPersistentStateConfigRule(
+ key_systems_->GetPersistentStateSupport(key_system),
+ EmeFeatureRequirement::Required);
+ // |distinctiveIdentifier| should not be affected after it is decided.
+ DCHECK(not_allowed_rule == EmeConfigRule::NOT_SUPPORTED ||
+ not_allowed_rule == EmeConfigRule::PERSISTENCE_NOT_ALLOWED);
+ DCHECK(required_rule == EmeConfigRule::NOT_SUPPORTED ||
+ required_rule == EmeConfigRule::PERSISTENCE_REQUIRED);
+ bool not_allowed_supported =
+ config_state->IsRuleSupported(not_allowed_rule);
+ bool required_supported = config_state->IsRuleSupported(required_rule);
+ if (not_allowed_supported) {
+ accumulated_configuration->persistentState =
+ blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed;
+ config_state->AddRule(not_allowed_rule);
+ } else if (required_supported) {
+ accumulated_configuration->persistentState =
+ blink::WebMediaKeySystemConfiguration::Requirement::Required;
+ config_state->AddRule(required_rule);
+ } else {
+ // We should not have passed step 5.
+ NOTREACHED();
+ return CONFIGURATION_NOT_SUPPORTED;
+ }
+ }
+
+ // 14. If implementation in the configuration specified by the combination of
+ // the values in accumulated configuration is not supported or not allowed
+ // in the origin, return null.
+ // 15. If accumulated configuration's distinctiveIdentifier value is
+ // "required", [prompt the user for consent].
+ if (accumulated_configuration->distinctiveIdentifier ==
+ blink::WebMediaKeySystemConfiguration::Requirement::Required) {
+ // The caller is responsible for resolving what to do if permission is
+ // required but has been denied (it should treat it as NOT_SUPPORTED).
+ if (!config_state->IsPermissionGranted())
+ return CONFIGURATION_REQUIRES_PERMISSION;
+ }
+
+ // 16. If the label member is present in candidate configuration, add the
+ // value of the candidate configuration's label member to accumulated
+ // configuration.
+ accumulated_configuration->label = candidate.label;
+
+ // 17. Return accumulated configuration.
+ return CONFIGURATION_SUPPORTED;
+}
+
+void KeySystemConfigSelector::SelectConfig(
+ const blink::WebString& key_system,
+ const blink::WebVector<blink::WebMediaKeySystemConfiguration>&
+ candidate_configurations,
+ const blink::WebSecurityOrigin& security_origin,
+ bool are_secure_codecs_supported,
+ base::Callback<void(const blink::WebMediaKeySystemConfiguration&,
+ const CdmConfig&)> succeeded_cb,
+ base::Callback<void(const blink::WebString&)> not_supported_cb) {
+ // Continued from requestMediaKeySystemAccess(), step 7, from
+ // https://w3c.github.io/encrypted-media/#requestmediakeysystemaccess
+ //
+ // 7.1. If keySystem is not one of the Key Systems supported by the user
+ // agent, reject promise with with a new DOMException whose name is
+ // NotSupportedError. String comparison is case-sensitive.
+ if (!base::IsStringASCII(key_system)) {
+ not_supported_cb.Run("Only ASCII keySystems are supported");
+ return;
+ }
+
+ std::string key_system_ascii = base::UTF16ToASCII(key_system);
+ if (!key_systems_->IsSupportedKeySystem(key_system_ascii)) {
+ not_supported_cb.Run("Unsupported keySystem");
+ return;
+ }
+
+ // 7.2-7.4. Implemented by OnSelectConfig().
+ // TODO(sandersd): This should be async, ideally not on the main thread.
+ scoped_ptr<SelectionRequest> request(new SelectionRequest());
+ request->key_system = key_system_ascii;
+ request->candidate_configurations = candidate_configurations;
+ request->security_origin = security_origin;
+ request->are_secure_codecs_supported = are_secure_codecs_supported;
+ request->succeeded_cb = succeeded_cb;
+ request->not_supported_cb = not_supported_cb;
+ SelectConfigInternal(request.Pass());
+}
+
+void KeySystemConfigSelector::SelectConfigInternal(
+ scoped_ptr<SelectionRequest> request) {
+ // Continued from requestMediaKeySystemAccess(), step 7.1, from
+ // https://w3c.github.io/encrypted-media/#requestmediakeysystemaccess
+ //
+ // 7.2. Let implementation be the implementation of keySystem.
+ // (|key_systems_| fills this role.)
+ // 7.3. For each value in supportedConfigurations:
+ for (size_t i = 0; i < request->candidate_configurations.size(); i++) {
+ // 7.3.1. Let candidate configuration be the value.
+ // 7.3.2. Let supported configuration be the result of executing the Get
+ // Supported Configuration algorithm on implementation, candidate
+ // configuration, and origin.
+ // 7.3.3. If supported configuration is not null, [initialize and return a
+ // new MediaKeySystemAccess object.]
+ ConfigState config_state(request->was_permission_requested,
+ request->is_permission_granted);
+ DCHECK(config_state.IsRuleSupported(
+ EmeConfigRule::HW_SECURE_CODECS_NOT_ALLOWED));
+ if (!request->are_secure_codecs_supported)
+ config_state.AddRule(EmeConfigRule::HW_SECURE_CODECS_NOT_ALLOWED);
+ blink::WebMediaKeySystemConfiguration accumulated_configuration;
+ CdmConfig cdm_config;
+ ConfigurationSupport support = GetSupportedConfiguration(
+ request->key_system, request->candidate_configurations[i],
+ &config_state, &accumulated_configuration);
+ switch (support) {
+ case CONFIGURATION_NOT_SUPPORTED:
+ continue;
+ case CONFIGURATION_REQUIRES_PERMISSION:
+ if (request->was_permission_requested) {
+ DVLOG(2) << "Rejecting requested configuration because "
+ << "permission was denied.";
+ continue;
+ }
+ {
+ // Note: the GURL must not be constructed inline because
+ // base::Passed(&request) sets |request| to null.
+ GURL security_origin(request->security_origin.toString());
+ media_permission_->RequestPermission(
+ MediaPermission::PROTECTED_MEDIA_IDENTIFIER, security_origin,
+ base::Bind(&KeySystemConfigSelector::OnPermissionResult,
+ weak_factory_.GetWeakPtr(), base::Passed(&request)));
+ }
+ return;
+ case CONFIGURATION_SUPPORTED:
+ cdm_config.allow_distinctive_identifier =
+ (accumulated_configuration.distinctiveIdentifier ==
+ blink::WebMediaKeySystemConfiguration::Requirement::Required);
+ cdm_config.allow_persistent_state =
+ (accumulated_configuration.persistentState ==
+ blink::WebMediaKeySystemConfiguration::Requirement::Required);
+ cdm_config.use_hw_secure_codecs =
+ config_state.AreHwSecureCodecsRequired();
+ request->succeeded_cb.Run(accumulated_configuration, cdm_config);
+ return;
+ }
+ }
+
+ // 7.4. Reject promise with a new DOMException whose name is
+ // NotSupportedError.
+ request->not_supported_cb.Run(
+ "None of the requested configurations were supported.");
+}
+
+void KeySystemConfigSelector::OnPermissionResult(
+ scoped_ptr<SelectionRequest> request,
+ bool is_permission_granted) {
+ request->was_permission_requested = true;
+ request->is_permission_granted = is_permission_granted;
+ SelectConfigInternal(request.Pass());
+}
+
+} // namespace media
diff --git a/chromium/media/blink/key_system_config_selector.h b/chromium/media/blink/key_system_config_selector.h
new file mode 100644
index 00000000000..7a8afbf0dab
--- /dev/null
+++ b/chromium/media/blink/key_system_config_selector.h
@@ -0,0 +1,97 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BLINK_KEY_SYSTEM_CONFIG_SELECTOR_H_
+#define MEDIA_BLINK_KEY_SYSTEM_CONFIG_SELECTOR_H_
+
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/eme_constants.h"
+#include "media/base/media_export.h"
+#include "third_party/WebKit/public/platform/WebVector.h"
+
+namespace blink {
+
+struct WebMediaKeySystemConfiguration;
+struct WebMediaKeySystemMediaCapability;
+class WebSecurityOrigin;
+class WebString;
+
+} // namespace blink
+
+namespace media {
+
+struct CdmConfig;
+class KeySystems;
+class MediaPermission;
+
+class MEDIA_EXPORT KeySystemConfigSelector {
+ public:
+ KeySystemConfigSelector(
+ const KeySystems* key_systems,
+ MediaPermission* media_permission);
+
+ ~KeySystemConfigSelector();
+
+ void SelectConfig(
+ const blink::WebString& key_system,
+ const blink::WebVector<blink::WebMediaKeySystemConfiguration>&
+ candidate_configurations,
+ const blink::WebSecurityOrigin& security_origin,
+ bool are_secure_codecs_supported,
+ base::Callback<void(const blink::WebMediaKeySystemConfiguration&,
+ const CdmConfig&)> succeeded_cb,
+ base::Callback<void(const blink::WebString&)> not_supported_cb);
+
+ private:
+ struct SelectionRequest;
+ class ConfigState;
+
+ enum ConfigurationSupport {
+ CONFIGURATION_NOT_SUPPORTED,
+ CONFIGURATION_REQUIRES_PERMISSION,
+ CONFIGURATION_SUPPORTED,
+ };
+
+ void SelectConfigInternal(scoped_ptr<SelectionRequest> request);
+
+ void OnPermissionResult(scoped_ptr<SelectionRequest> request,
+ bool is_permission_granted);
+
+ ConfigurationSupport GetSupportedConfiguration(
+ const std::string& key_system,
+ const blink::WebMediaKeySystemConfiguration& candidate,
+ ConfigState* config_state,
+ blink::WebMediaKeySystemConfiguration* accumulated_configuration);
+
+ bool GetSupportedCapabilities(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const blink::WebVector<blink::WebMediaKeySystemMediaCapability>&
+ requested_media_capabilities,
+ ConfigState* config_state,
+ std::vector<blink::WebMediaKeySystemMediaCapability>*
+ supported_media_capabilities);
+
+ bool IsSupportedContentType(const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& container_mime_type,
+ const std::string& codecs,
+ ConfigState* config_state);
+
+ const KeySystems* key_systems_;
+ MediaPermission* media_permission_;
+ base::WeakPtrFactory<KeySystemConfigSelector> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeySystemConfigSelector);
+};
+
+} // namespace media
+
+#endif // MEDIA_BLINK_KEY_SYSTEM_CONFIG_SELECTOR_H_
diff --git a/chromium/media/blink/key_system_config_selector_unittest.cc b/chromium/media/blink/key_system_config_selector_unittest.cc
new file mode 100644
index 00000000000..70b890d8f6d
--- /dev/null
+++ b/chromium/media/blink/key_system_config_selector_unittest.cc
@@ -0,0 +1,767 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "media/base/eme_constants.h"
+#include "media/base/key_systems.h"
+#include "media/base/media_permission.h"
+#include "media/blink/key_system_config_selector.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebEncryptedMediaTypes.h"
+#include "third_party/WebKit/public/platform/WebMediaKeySystemConfiguration.h"
+#include "third_party/WebKit/public/platform/WebSecurityOrigin.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+#include "url/gurl.h"
+
+namespace media {
+
+namespace {
+
+const char kSupported[] = "supported";
+const char kRecommendIdentifier[] = "recommend_identifier";
+const char kRequireIdentifier[] = "require_identifier";
+const char kUnsupported[] = "unsupported";
+
+// TODO(sandersd): Audio and video variants (doesn't matter for now because the
+// checks are in KeySystems).
+const char kSupportedContainer[] = "video/webm";
+const char kUnsupportedContainer[] = "video/foo";
+
+// TODO(sandersd): Extended codec variants (requires proprietary codec support).
+const char kSupportedCodec[] = "vp8";
+const char kUnsupportedCodec[] = "foo";
+const char kUnsupportedCodecs[] = "vp8,foo";
+const char kSupportedCodecs[] = "vp8,vp8";
+
+const char kDefaultSecurityOrigin[] = "https://example.com/";
+
+class FakeKeySystems : public KeySystems {
+ public:
+ ~FakeKeySystems() override {
+ }
+
+ bool IsSupportedKeySystem(const std::string& key_system) const override {
+ if (key_system == kSupported)
+ return true;
+ return false;
+ }
+
+ // TODO(sandersd): Move implementation into KeySystemConfigSelector?
+ bool IsSupportedInitDataType(const std::string& key_system,
+ EmeInitDataType init_data_type) const override {
+ switch (init_data_type) {
+ case EmeInitDataType::UNKNOWN:
+ return false;
+ case EmeInitDataType::WEBM:
+ return (init_data_types & kInitDataTypeMaskWebM) != 0;
+ case EmeInitDataType::CENC:
+ return (init_data_types & kInitDataTypeMaskCenc) != 0;
+ case EmeInitDataType::KEYIDS:
+ return (init_data_types & kInitDataTypeMaskKeyIds) != 0;
+ }
+ NOTREACHED();
+ return false;
+ }
+
+ // TODO(sandersd): Secure codec simulation.
+ EmeConfigRule GetContentTypeConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& container_mime_type,
+ const std::vector<std::string>& codecs) const override {
+ if (container_mime_type == kUnsupportedContainer)
+ return EmeConfigRule::NOT_SUPPORTED;
+ DCHECK_EQ(kSupportedContainer, container_mime_type);
+ for (const std::string& codec : codecs) {
+ if (codec == kUnsupportedCodec)
+ return EmeConfigRule::NOT_SUPPORTED;
+ DCHECK_EQ(kSupportedCodec, codec);
+ }
+ return EmeConfigRule::SUPPORTED;
+ }
+
+ EmeConfigRule GetRobustnessConfigRule(
+ const std::string& key_system,
+ EmeMediaType media_type,
+ const std::string& requested_robustness) const override {
+ if (requested_robustness == kUnsupported)
+ return EmeConfigRule::NOT_SUPPORTED;
+ if (requested_robustness == kRequireIdentifier)
+ return EmeConfigRule::IDENTIFIER_REQUIRED;
+ if (requested_robustness == kRecommendIdentifier)
+ return EmeConfigRule::IDENTIFIER_RECOMMENDED;
+ if (requested_robustness == kSupported)
+ return EmeConfigRule::SUPPORTED;
+ NOTREACHED();
+ return EmeConfigRule::NOT_SUPPORTED;
+ }
+
+ EmeSessionTypeSupport GetPersistentLicenseSessionSupport(
+ const std::string& key_system) const override {
+ return persistent_license;
+ }
+
+ EmeSessionTypeSupport GetPersistentReleaseMessageSessionSupport(
+ const std::string& key_system) const override {
+ return persistent_release_message;
+ }
+
+ EmeFeatureSupport GetPersistentStateSupport(
+ const std::string& key_system) const override {
+ return persistent_state;
+ }
+
+ EmeFeatureSupport GetDistinctiveIdentifierSupport(
+ const std::string& key_system) const override {
+ return distinctive_identifier;
+ }
+
+ InitDataTypeMask init_data_types = kInitDataTypeMaskNone;
+
+ // INVALID so that they must be set in any test that needs them.
+ EmeSessionTypeSupport persistent_license = EmeSessionTypeSupport::INVALID;
+ EmeSessionTypeSupport persistent_release_message =
+ EmeSessionTypeSupport::INVALID;
+
+ // Every test implicitly requires these, so they must be set. They are set to
+ // values that are likely to cause tests to fail if they are accidentally
+ // depended on. Test cases explicitly depending on them should set them, as
+ // the default values may be changed.
+ EmeFeatureSupport persistent_state = EmeFeatureSupport::NOT_SUPPORTED;
+ EmeFeatureSupport distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+};
+
+class FakeMediaPermission : public MediaPermission {
+ public:
+ void HasPermission(Type type,
+ const GURL& security_origin,
+ const PermissionStatusCB& permission_status_cb) override {
+ permission_status_cb.Run(is_granted);
+ }
+
+ void RequestPermission(
+ Type type,
+ const GURL& security_origin,
+ const PermissionStatusCB& permission_status_cb) override {
+ requests++;
+ permission_status_cb.Run(is_granted);
+ }
+
+ int requests = 0;
+ bool is_granted = false;
+};
+
+} // namespace
+
+class KeySystemConfigSelectorTest : public testing::Test {
+ public:
+ KeySystemConfigSelectorTest()
+ : key_systems_(new FakeKeySystems()),
+ media_permission_(new FakeMediaPermission()) {}
+
+ void SelectConfig() {
+ media_permission_->requests = 0;
+ succeeded_count_ = 0;
+ not_supported_count_ = 0;
+ KeySystemConfigSelector(key_systems_.get(), media_permission_.get())
+ .SelectConfig(key_system_, configs_, security_origin_, false,
+ base::Bind(&KeySystemConfigSelectorTest::OnSucceeded,
+ base::Unretained(this)),
+ base::Bind(&KeySystemConfigSelectorTest::OnNotSupported,
+ base::Unretained(this)));
+ }
+
+ bool SelectConfigReturnsConfig() {
+ SelectConfig();
+ EXPECT_EQ(0, media_permission_->requests);
+ EXPECT_EQ(1, succeeded_count_);
+ EXPECT_EQ(0, not_supported_count_);
+ return (succeeded_count_ != 0);
+ }
+
+ bool SelectConfigReturnsError() {
+ SelectConfig();
+ EXPECT_EQ(0, media_permission_->requests);
+ EXPECT_EQ(0, succeeded_count_);
+ EXPECT_EQ(1, not_supported_count_);
+ return (not_supported_count_ != 0);
+ }
+
+ bool SelectConfigRequestsPermissionAndReturnsConfig() {
+ SelectConfig();
+ EXPECT_EQ(1, media_permission_->requests);
+ EXPECT_EQ(1, succeeded_count_);
+ EXPECT_EQ(0, not_supported_count_);
+ return (media_permission_->requests != 0 && succeeded_count_ != 0);
+ }
+
+ bool SelectConfigRequestsPermissionAndReturnsError() {
+ SelectConfig();
+ EXPECT_EQ(1, media_permission_->requests);
+ EXPECT_EQ(0, succeeded_count_);
+ EXPECT_EQ(1, not_supported_count_);
+ return (media_permission_->requests != 0 && not_supported_count_ != 0);
+ }
+
+ void OnSucceeded(const blink::WebMediaKeySystemConfiguration& result,
+ const CdmConfig& cdm_config) {
+ succeeded_count_++;
+ config_ = result;
+ }
+
+ void OnNotSupported(const blink::WebString&) { not_supported_count_++; }
+
+ scoped_ptr<FakeKeySystems> key_systems_;
+ scoped_ptr<FakeMediaPermission> media_permission_;
+
+ // Held values for the call to SelectConfig().
+ blink::WebString key_system_ = blink::WebString::fromUTF8(kSupported);
+ std::vector<blink::WebMediaKeySystemConfiguration> configs_;
+ blink::WebSecurityOrigin security_origin_ =
+ blink::WebSecurityOrigin::createFromString(kDefaultSecurityOrigin);
+
+ // Holds the last successful accumulated configuration.
+ blink::WebMediaKeySystemConfiguration config_;
+
+ int succeeded_count_;
+ int not_supported_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeySystemConfigSelectorTest);
+};
+
+// --- Basics ---
+
+TEST_F(KeySystemConfigSelectorTest, NoConfigs) {
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+// Most of the tests below assume that an empty config is valid.
+TEST_F(KeySystemConfigSelectorTest, EmptyConfig) {
+ configs_.push_back(blink::WebMediaKeySystemConfiguration());
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ EXPECT_TRUE(config_.label.isNull());
+ EXPECT_TRUE(config_.initDataTypes.isEmpty());
+ EXPECT_TRUE(config_.audioCapabilities.isEmpty());
+ EXPECT_TRUE(config_.videoCapabilities.isEmpty());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed,
+ config_.distinctiveIdentifier);
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed,
+ config_.persistentState);
+ ASSERT_EQ(1u, config_.sessionTypes.size());
+ EXPECT_EQ(blink::WebEncryptedMediaSessionType::Temporary,
+ config_.sessionTypes[0]);
+}
+
+TEST_F(KeySystemConfigSelectorTest, Label) {
+ blink::WebMediaKeySystemConfiguration config;
+ config.label = "foo";
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ EXPECT_EQ("foo", config_.label);
+}
+
+// --- keySystem ---
+// Empty is not tested because the empty check is in Blink.
+
+TEST_F(KeySystemConfigSelectorTest, KeySystem_NonAscii) {
+ key_system_ = "\xde\xad\xbe\xef";
+ configs_.push_back(blink::WebMediaKeySystemConfiguration());
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, KeySystem_Unsupported) {
+ key_system_ = kUnsupported;
+ configs_.push_back(blink::WebMediaKeySystemConfiguration());
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+// --- initDataTypes ---
+
+TEST_F(KeySystemConfigSelectorTest, InitDataTypes_Empty) {
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasInitDataTypes = true;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, InitDataTypes_NoneSupported) {
+ key_systems_->init_data_types = kInitDataTypeMaskWebM;
+
+ std::vector<blink::WebEncryptedMediaInitDataType> init_data_types;
+ init_data_types.push_back(blink::WebEncryptedMediaInitDataType::Unknown);
+ init_data_types.push_back(blink::WebEncryptedMediaInitDataType::Cenc);
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasInitDataTypes = true;
+ config.initDataTypes = init_data_types;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, InitDataTypes_SubsetSupported) {
+ key_systems_->init_data_types = kInitDataTypeMaskWebM;
+
+ std::vector<blink::WebEncryptedMediaInitDataType> init_data_types;
+ init_data_types.push_back(blink::WebEncryptedMediaInitDataType::Unknown);
+ init_data_types.push_back(blink::WebEncryptedMediaInitDataType::Cenc);
+ init_data_types.push_back(blink::WebEncryptedMediaInitDataType::Webm);
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasInitDataTypes = true;
+ config.initDataTypes = init_data_types;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ(1u, config_.initDataTypes.size());
+ EXPECT_EQ(blink::WebEncryptedMediaInitDataType::Webm,
+ config_.initDataTypes[0]);
+}
+
+// --- distinctiveIdentifier ---
+
+TEST_F(KeySystemConfigSelectorTest, DistinctiveIdentifier_Default) {
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed,
+ config_.distinctiveIdentifier);
+}
+
+TEST_F(KeySystemConfigSelectorTest, DistinctiveIdentifier_Forced) {
+ media_permission_->is_granted = true;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::ALWAYS_ENABLED;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::Required,
+ config_.distinctiveIdentifier);
+}
+
+TEST_F(KeySystemConfigSelectorTest, DistinctiveIdentifier_Blocked) {
+ key_systems_->distinctive_identifier = EmeFeatureSupport::NOT_SUPPORTED;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Required;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, DistinctiveIdentifier_RequestsPermission) {
+ media_permission_->is_granted = true;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Required;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::Required,
+ config_.distinctiveIdentifier);
+}
+
+TEST_F(KeySystemConfigSelectorTest, DistinctiveIdentifier_RespectsPermission) {
+ media_permission_->is_granted = false;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Required;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsError());
+}
+
+// --- persistentState ---
+
+TEST_F(KeySystemConfigSelectorTest, PersistentState_Default) {
+ key_systems_->persistent_state = EmeFeatureSupport::REQUESTABLE;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.persistentState =
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed,
+ config_.persistentState);
+}
+
+TEST_F(KeySystemConfigSelectorTest, PersistentState_Forced) {
+ key_systems_->persistent_state = EmeFeatureSupport::ALWAYS_ENABLED;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.persistentState =
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::Required,
+ config_.persistentState);
+}
+
+TEST_F(KeySystemConfigSelectorTest, PersistentState_Blocked) {
+ key_systems_->persistent_state = EmeFeatureSupport::ALWAYS_ENABLED;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.persistentState =
+ blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+// --- sessionTypes ---
+
+TEST_F(KeySystemConfigSelectorTest, SessionTypes_Empty) {
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasSessionTypes = true;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ EXPECT_TRUE(config_.sessionTypes.isEmpty());
+}
+
+TEST_F(KeySystemConfigSelectorTest, SessionTypes_SubsetSupported) {
+ // Allow persistent state, as it would be required to be successful.
+ key_systems_->persistent_state = EmeFeatureSupport::REQUESTABLE;
+ key_systems_->persistent_license = EmeSessionTypeSupport::NOT_SUPPORTED;
+
+ std::vector<blink::WebEncryptedMediaSessionType> session_types;
+ session_types.push_back(blink::WebEncryptedMediaSessionType::Temporary);
+ session_types.push_back(
+ blink::WebEncryptedMediaSessionType::PersistentLicense);
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasSessionTypes = true;
+ config.sessionTypes = session_types;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, SessionTypes_AllSupported) {
+ // Allow persistent state, and expect it to be required.
+ key_systems_->persistent_state = EmeFeatureSupport::REQUESTABLE;
+ key_systems_->persistent_license = EmeSessionTypeSupport::SUPPORTED;
+
+ std::vector<blink::WebEncryptedMediaSessionType> session_types;
+ session_types.push_back(blink::WebEncryptedMediaSessionType::Temporary);
+ session_types.push_back(
+ blink::WebEncryptedMediaSessionType::PersistentLicense);
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.persistentState =
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional;
+ config.hasSessionTypes = true;
+ config.sessionTypes = session_types;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::Required,
+ config_.persistentState);
+ ASSERT_EQ(2u, config_.sessionTypes.size());
+ EXPECT_EQ(blink::WebEncryptedMediaSessionType::Temporary,
+ config_.sessionTypes[0]);
+ EXPECT_EQ(blink::WebEncryptedMediaSessionType::PersistentLicense,
+ config_.sessionTypes[1]);
+}
+
+TEST_F(KeySystemConfigSelectorTest, SessionTypes_PermissionCanBeRequired) {
+ media_permission_->is_granted = true;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+ key_systems_->persistent_state = EmeFeatureSupport::REQUESTABLE;
+ key_systems_->persistent_license =
+ EmeSessionTypeSupport::SUPPORTED_WITH_IDENTIFIER;
+
+ std::vector<blink::WebEncryptedMediaSessionType> session_types;
+ session_types.push_back(
+ blink::WebEncryptedMediaSessionType::PersistentLicense);
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional;
+ config.persistentState =
+ blink::WebMediaKeySystemConfiguration::Requirement::Optional;
+ config.hasSessionTypes = true;
+ config.sessionTypes = session_types;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::Required,
+ config_.distinctiveIdentifier);
+}
+
+// --- videoCapabilities ---
+
+TEST_F(KeySystemConfigSelectorTest, VideoCapabilities_Empty) {
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, VideoCapabilities_NoneSupported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(2);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kUnsupportedContainer;
+ video_capabilities[1].contentType = "b";
+ video_capabilities[1].mimeType = kSupportedContainer;
+ video_capabilities[1].codecs = kUnsupportedCodec;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, VideoCapabilities_SubsetSupported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(2);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kUnsupportedContainer;
+ video_capabilities[1].contentType = "b";
+ video_capabilities[1].mimeType = kSupportedContainer;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ(1u, config_.videoCapabilities.size());
+ EXPECT_EQ("b", config_.videoCapabilities[0].contentType);
+ EXPECT_EQ(kSupportedContainer, config_.videoCapabilities[0].mimeType);
+}
+
+TEST_F(KeySystemConfigSelectorTest, VideoCapabilities_AllSupported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(2);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kSupportedContainer;
+ video_capabilities[1].contentType = "b";
+ video_capabilities[1].mimeType = kSupportedContainer;
+ video_capabilities[1].codecs = kSupportedCodecs;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ(2u, config_.videoCapabilities.size());
+ EXPECT_EQ("a", config_.videoCapabilities[0].contentType);
+ EXPECT_EQ("b", config_.videoCapabilities[1].contentType);
+}
+
+TEST_F(KeySystemConfigSelectorTest,
+ VideoCapabilities_Codecs_SubsetSupported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(1);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kSupportedContainer;
+ video_capabilities[0].codecs = kUnsupportedCodecs;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest, VideoCapabilities_Codecs_AllSupported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(1);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kSupportedContainer;
+ video_capabilities[0].codecs = kSupportedCodecs;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ(1u, config_.videoCapabilities.size());
+ EXPECT_EQ(kSupportedCodecs, config_.videoCapabilities[0].codecs);
+}
+
+TEST_F(KeySystemConfigSelectorTest, VideoCapabilities_Robustness_Supported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(1);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kSupportedContainer;
+ video_capabilities[0].robustness = kSupported;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ(1u, config_.videoCapabilities.size());
+ EXPECT_EQ(kSupported, config_.videoCapabilities[0].robustness);
+}
+
+TEST_F(KeySystemConfigSelectorTest, VideoCapabilities_Robustness_Unsupported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(1);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kSupportedContainer;
+ video_capabilities[0].robustness = kUnsupported;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsError());
+}
+
+TEST_F(KeySystemConfigSelectorTest,
+ VideoCapabilities_Robustness_PermissionCanBeRequired) {
+ media_permission_->is_granted = true;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(1);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kSupportedContainer;
+ video_capabilities[0].robustness = kRequireIdentifier;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::Required,
+ config_.distinctiveIdentifier);
+}
+
+TEST_F(KeySystemConfigSelectorTest,
+ VideoCapabilities_Robustness_PermissionCanBeRecommended) {
+ media_permission_->is_granted = false;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+
+ std::vector<blink::WebMediaKeySystemMediaCapability> video_capabilities(1);
+ video_capabilities[0].contentType = "a";
+ video_capabilities[0].mimeType = kSupportedContainer;
+ video_capabilities[0].robustness = kRecommendIdentifier;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasVideoCapabilities = true;
+ config.videoCapabilities = video_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsConfig());
+ EXPECT_EQ(blink::WebMediaKeySystemConfiguration::Requirement::NotAllowed,
+ config_.distinctiveIdentifier);
+}
+
+// --- audioCapabilities ---
+// These are handled by the same code as |videoCapabilities|, so only minimal
+// additional testing is done.
+
+TEST_F(KeySystemConfigSelectorTest, AudioCapabilities_SubsetSupported) {
+ std::vector<blink::WebMediaKeySystemMediaCapability> audio_capabilities(2);
+ audio_capabilities[0].contentType = "a";
+ audio_capabilities[0].mimeType = kUnsupportedContainer;
+ audio_capabilities[1].contentType = "b";
+ audio_capabilities[1].mimeType = kSupportedContainer;
+
+ blink::WebMediaKeySystemConfiguration config;
+ config.hasAudioCapabilities = true;
+ config.audioCapabilities = audio_capabilities;
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ(1u, config_.audioCapabilities.size());
+ EXPECT_EQ("b", config_.audioCapabilities[0].contentType);
+ EXPECT_EQ(kSupportedContainer, config_.audioCapabilities[0].mimeType);
+}
+
+// --- Multiple configurations ---
+
+TEST_F(KeySystemConfigSelectorTest, Configurations_AllSupported) {
+ blink::WebMediaKeySystemConfiguration config;
+ config.label = "a";
+ configs_.push_back(config);
+ config.label = "b";
+ configs_.push_back(config);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ("a", config_.label);
+}
+
+TEST_F(KeySystemConfigSelectorTest, Configurations_SubsetSupported) {
+ blink::WebMediaKeySystemConfiguration config1;
+ config1.label = "a";
+ config1.hasInitDataTypes = true;
+ configs_.push_back(config1);
+
+ blink::WebMediaKeySystemConfiguration config2;
+ config2.label = "b";
+ configs_.push_back(config2);
+
+ ASSERT_TRUE(SelectConfigReturnsConfig());
+ ASSERT_EQ("b", config_.label);
+}
+
+TEST_F(KeySystemConfigSelectorTest,
+ Configurations_FirstRequiresPermission_Allowed) {
+ media_permission_->is_granted = true;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+
+ blink::WebMediaKeySystemConfiguration config1;
+ config1.label = "a";
+ config1.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Required;
+ configs_.push_back(config1);
+
+ blink::WebMediaKeySystemConfiguration config2;
+ config2.label = "b";
+ configs_.push_back(config2);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsConfig());
+ ASSERT_EQ("a", config_.label);
+}
+
+TEST_F(KeySystemConfigSelectorTest,
+ Configurations_FirstRequiresPermission_Rejected) {
+ media_permission_->is_granted = false;
+ key_systems_->distinctive_identifier = EmeFeatureSupport::REQUESTABLE;
+
+ blink::WebMediaKeySystemConfiguration config1;
+ config1.label = "a";
+ config1.distinctiveIdentifier =
+ blink::WebMediaKeySystemConfiguration::Requirement::Required;
+ configs_.push_back(config1);
+
+ blink::WebMediaKeySystemConfiguration config2;
+ config2.label = "b";
+ configs_.push_back(config2);
+
+ ASSERT_TRUE(SelectConfigRequestsPermissionAndReturnsConfig());
+ ASSERT_EQ("b", config_.label);
+}
+
+} // namespace media
diff --git a/chromium/media/blink/media_blink.gyp b/chromium/media/blink/media_blink.gyp
index ca92dce2c61..3462a382c99 100644
--- a/chromium/media/blink/media_blink.gyp
+++ b/chromium/media/blink/media_blink.gyp
@@ -12,8 +12,10 @@
'../../base/base.gyp:base',
'../../cc/cc.gyp:cc',
'../../cc/blink/cc_blink.gyp:cc_blink',
+ '../../gpu/blink/gpu_blink.gyp:gpu_blink',
'../../ui/gfx/gfx.gyp:gfx_geometry',
'../../net/net.gyp:net',
+ '../../skia/skia.gyp:skia',
'../../third_party/WebKit/public/blink.gyp:blink',
'../media.gyp:media',
'../media.gyp:shared_memory_support',
@@ -37,18 +39,28 @@
'cdm_result_promise.h',
'cdm_result_promise_helper.cc',
'cdm_result_promise_helper.h',
+ 'cdm_session_adapter.cc',
+ 'cdm_session_adapter.h',
'encrypted_media_player_support.cc',
'encrypted_media_player_support.h',
+ 'key_system_config_selector.cc',
+ 'key_system_config_selector.h',
'new_session_cdm_result_promise.cc',
'new_session_cdm_result_promise.h',
- 'null_encrypted_media_player_support.cc',
- 'null_encrypted_media_player_support.h',
'texttrack_impl.cc',
'texttrack_impl.h',
'video_frame_compositor.cc',
'video_frame_compositor.h',
'webaudiosourceprovider_impl.cc',
'webaudiosourceprovider_impl.h',
+ 'webcontentdecryptionmodule_impl.cc',
+ 'webcontentdecryptionmodule_impl.h',
+ 'webcontentdecryptionmoduleaccess_impl.cc',
+ 'webcontentdecryptionmoduleaccess_impl.h',
+ 'webcontentdecryptionmodulesession_impl.cc',
+ 'webcontentdecryptionmodulesession_impl.h',
+ 'webencryptedmediaclient_impl.cc',
+ 'webencryptedmediaclient_impl.h',
'webinbandtexttrack_impl.cc',
'webinbandtexttrack_impl.h',
'webmediaplayer_delegate.h',
@@ -66,6 +78,8 @@
'conditions': [
['OS=="android"', {
'sources!': [
+ 'encrypted_media_player_support.cc',
+ 'encrypted_media_player_support.h',
'webmediaplayer_impl.cc',
'webmediaplayer_impl.h',
],
@@ -83,6 +97,7 @@
'../../base/base.gyp:test_support_base',
'../../cc/cc.gyp:cc',
'../../cc/blink/cc_blink.gyp:cc_blink',
+ '../../gin/gin.gyp:gin',
'../../net/net.gyp:net',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
@@ -97,6 +112,7 @@
'buffered_data_source_unittest.cc',
'buffered_resource_loader_unittest.cc',
'cache_util_unittest.cc',
+ 'key_system_config_selector_unittest.cc',
'mock_webframeclient.h',
'mock_weburlloader.cc',
'mock_weburlloader.h',
diff --git a/chromium/media/blink/new_session_cdm_result_promise.cc b/chromium/media/blink/new_session_cdm_result_promise.cc
index 6ac5247a78c..0a07614dc0a 100644
--- a/chromium/media/blink/new_session_cdm_result_promise.cc
+++ b/chromium/media/blink/new_session_cdm_result_promise.cc
@@ -22,11 +22,11 @@ NewSessionCdmResultPromise::NewSessionCdmResultPromise(
NewSessionCdmResultPromise::~NewSessionCdmResultPromise() {
}
-void NewSessionCdmResultPromise::resolve(const std::string& web_session_id) {
+void NewSessionCdmResultPromise::resolve(const std::string& session_id) {
MarkPromiseSettled();
ReportCdmResultUMA(uma_name_, SUCCESS);
blink::WebContentDecryptionModuleResult::SessionStatus status =
- new_session_created_cb_.Run(web_session_id);
+ new_session_created_cb_.Run(session_id);
web_cdm_result_.completeWithSession(status);
}
diff --git a/chromium/media/blink/new_session_cdm_result_promise.h b/chromium/media/blink/new_session_cdm_result_promise.h
index 7ed6ec109ba..c4b657da122 100644
--- a/chromium/media/blink/new_session_cdm_result_promise.h
+++ b/chromium/media/blink/new_session_cdm_result_promise.h
@@ -16,7 +16,7 @@
namespace media {
typedef base::Callback<blink::WebContentDecryptionModuleResult::SessionStatus(
- const std::string& web_session_id)> SessionInitializedCB;
+ const std::string& session_id)> SessionInitializedCB;
// Special class for resolving a new session promise. Resolving a new session
// promise returns the session ID (as a string), but the blink promise needs
@@ -32,7 +32,7 @@ class MEDIA_EXPORT NewSessionCdmResultPromise
~NewSessionCdmResultPromise() override;
// CdmPromiseTemplate<T> implementation.
- void resolve(const std::string& web_session_id) override;
+ void resolve(const std::string& session_id) override;
void reject(MediaKeys::Exception exception_code,
uint32 system_code,
const std::string& error_message) override;
diff --git a/chromium/media/blink/null_encrypted_media_player_support.cc b/chromium/media/blink/null_encrypted_media_player_support.cc
deleted file mode 100644
index 240e23e5b5c..00000000000
--- a/chromium/media/blink/null_encrypted_media_player_support.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/blink/null_encrypted_media_player_support.h"
-
-#include "base/bind.h"
-#include "third_party/WebKit/public/platform/WebContentDecryptionModule.h"
-#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
-
-namespace media {
-
-static void NeedKeyHandler(const std::string& type,
- const std::vector<uint8>& init_data) {
- NOTIMPLEMENTED();
-}
-
-scoped_ptr<EncryptedMediaPlayerSupport>
-NullEncryptedMediaPlayerSupport::Create(blink::WebMediaPlayerClient* client) {
- return scoped_ptr<EncryptedMediaPlayerSupport>(
- new NullEncryptedMediaPlayerSupport());
-}
-
-NullEncryptedMediaPlayerSupport::NullEncryptedMediaPlayerSupport() {
-}
-
-NullEncryptedMediaPlayerSupport::~NullEncryptedMediaPlayerSupport() {
-}
-
-blink::WebMediaPlayer::MediaKeyException
-NullEncryptedMediaPlayerSupport::GenerateKeyRequest(
- blink::WebLocalFrame* frame,
- const blink::WebString& key_system,
- const unsigned char* init_data,
- unsigned init_data_length) {
- return blink::WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
-}
-
-blink::WebMediaPlayer::MediaKeyException
-NullEncryptedMediaPlayerSupport::AddKey(
- const blink::WebString& key_system,
- const unsigned char* key,
- unsigned key_length,
- const unsigned char* init_data,
- unsigned init_data_length,
- const blink::WebString& session_id) {
- return blink::WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
-}
-
-blink::WebMediaPlayer::MediaKeyException
-NullEncryptedMediaPlayerSupport::CancelKeyRequest(
- const blink::WebString& key_system,
- const blink::WebString& session_id) {
- return blink::WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
-}
-
-void NullEncryptedMediaPlayerSupport::SetInitialContentDecryptionModule(
- blink::WebContentDecryptionModule* initial_cdm) {
-}
-
-void NullEncryptedMediaPlayerSupport::SetContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm) {
-}
-
-void NullEncryptedMediaPlayerSupport::SetContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm,
- blink::WebContentDecryptionModuleResult result) {
- result.completeWithError(
- blink::WebContentDecryptionModuleExceptionNotSupportedError,
- 0,
- "Null MediaKeys object is not supported.");
-}
-
-Demuxer::NeedKeyCB NullEncryptedMediaPlayerSupport::CreateNeedKeyCB() {
- return base::Bind(&NeedKeyHandler);
-}
-
-SetDecryptorReadyCB
-NullEncryptedMediaPlayerSupport::CreateSetDecryptorReadyCB() {
- return SetDecryptorReadyCB();
-}
-
-void NullEncryptedMediaPlayerSupport::OnPipelineDecryptError() {
-}
-
-} // namespace media
diff --git a/chromium/media/blink/null_encrypted_media_player_support.h b/chromium/media/blink/null_encrypted_media_player_support.h
deleted file mode 100644
index 3b53ad24ab2..00000000000
--- a/chromium/media/blink/null_encrypted_media_player_support.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BLINK_NULL_ENCRYPTED_MEDIA_PLAYER_SUPPORT_H_
-#define MEDIA_BLINK_NULL_ENCRYPTED_MEDIA_PLAYER_SUPPORT_H_
-
-#include "media/base/media_export.h"
-#include "media/blink/encrypted_media_player_support.h"
-
-namespace media {
-
-// A "null" implementation of the EncryptedMediaPlayerSupport interface
-// that indicates all key systems are not supported. This makes sure that
-// any attempts to play encrypted content always fail.
-class MEDIA_EXPORT NullEncryptedMediaPlayerSupport
- : public EncryptedMediaPlayerSupport {
- public:
- static scoped_ptr<EncryptedMediaPlayerSupport> Create(
- blink::WebMediaPlayerClient* client);
-
- ~NullEncryptedMediaPlayerSupport() override;
-
- // Prefixed API methods.
- blink::WebMediaPlayer::MediaKeyException GenerateKeyRequest(
- blink::WebLocalFrame* frame,
- const blink::WebString& key_system,
- const unsigned char* init_data,
- unsigned init_data_length) override;
-
- blink::WebMediaPlayer::MediaKeyException AddKey(
- const blink::WebString& key_system,
- const unsigned char* key,
- unsigned key_length,
- const unsigned char* init_data,
- unsigned init_data_length,
- const blink::WebString& session_id) override;
-
- blink::WebMediaPlayer::MediaKeyException CancelKeyRequest(
- const blink::WebString& key_system,
- const blink::WebString& session_id) override;
-
- // Unprefixed API methods.
- void SetInitialContentDecryptionModule(
- blink::WebContentDecryptionModule* initial_cdm) override;
- void SetContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm) override;
- void SetContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm,
- blink::WebContentDecryptionModuleResult result) override;
-
- // Callback factory and notification methods used by WebMediaPlayerImpl.
-
- // Creates a callback that Demuxers can use to signal that the content
- // requires a key. This method makes sure the callback returned can be safely
- // invoked from any thread.
- Demuxer::NeedKeyCB CreateNeedKeyCB() override;
-
- // Creates a callback that renderers can use to set decryptor
- // ready callback. This method makes sure the callback returned can be safely
- // invoked from any thread.
- SetDecryptorReadyCB CreateSetDecryptorReadyCB() override;
-
- // Called to inform this object that the media pipeline encountered
- // and handled a decryption error.
- void OnPipelineDecryptError() override;
-
- private:
- NullEncryptedMediaPlayerSupport();
-
- DISALLOW_COPY_AND_ASSIGN(NullEncryptedMediaPlayerSupport);
-};
-
-} // namespace media
-
-#endif // MEDIA_BLINK_NULL_ENCRYPTED_MEDIA_PLAYER_SUPPORT_H_
diff --git a/chromium/media/blink/run_all_unittests.cc b/chromium/media/blink/run_all_unittests.cc
index 83c75d6c4ec..0261dc2602e 100644
--- a/chromium/media/blink/run_all_unittests.cc
+++ b/chromium/media/blink/run_all_unittests.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
#include "base/test/launcher/unit_test_launcher.h"
#include "base/test/test_suite.h"
#include "build/build_config.h"
@@ -15,6 +16,10 @@
#include "ui/gl/android/gl_jni_registrar.h"
#endif
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+#include "gin/v8_initializer.h"
+#endif
+
class TestBlinkPlatformSupport : NON_EXPORTED_BASE(public blink::Platform) {
public:
virtual ~TestBlinkPlatformSupport();
@@ -41,10 +46,10 @@ const unsigned char* TestBlinkPlatformSupport::getTraceCategoryEnabledFlag(
class BlinkMediaTestSuite : public base::TestSuite {
public:
BlinkMediaTestSuite(int argc, char** argv);
- virtual ~BlinkMediaTestSuite();
+ ~BlinkMediaTestSuite() override;
protected:
- virtual void Initialize() override;
+ void Initialize() override;
private:
scoped_ptr<TestBlinkPlatformSupport> blink_platform_support_;
@@ -73,6 +78,16 @@ void BlinkMediaTestSuite::Initialize() {
// present.
media::InitializeMediaLibraryForTesting();
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+ gin::V8Initializer::LoadV8Snapshot();
+#endif
+
+ // Dummy task runner is initialized here because the blink::initialize creates
+ // IsolateHolder which needs the current task runner handle. There should be
+ // no task posted to this task runner.
+ scoped_ptr<base::MessageLoop> message_loop;
+ if (!base::MessageLoop::current())
+ message_loop.reset(new base::MessageLoop());
blink::initialize(blink_platform_support_.get());
}
diff --git a/chromium/media/filters/skcanvas_video_renderer.cc b/chromium/media/blink/skcanvas_video_renderer.cc
index 2243445d459..58aedf96ef2 100644
--- a/chromium/media/filters/skcanvas_video_renderer.cc
+++ b/chromium/media/blink/skcanvas_video_renderer.cc
@@ -2,15 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/skcanvas_video_renderer.h"
+#include "media/blink/skcanvas_video_renderer.h"
-#include "base/logging.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/video_frame.h"
#include "media/base/yuv_convert.h"
+#include "skia/ext/refptr.h"
#include "third_party/libyuv/include/libyuv.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkImageGenerator.h"
#include "third_party/skia/include/gpu/GrContext.h"
+#include "third_party/skia/include/gpu/GrTextureProvider.h"
+#include "third_party/skia/include/gpu/SkGrPixelRef.h"
#include "ui/gfx/skbitmap_operations.h"
// Skia internal format depends on a platform. On Android it is ABGR, on others
@@ -44,6 +49,7 @@ bool IsYUV(media::VideoFrame::Format format) {
case VideoFrame::I420:
case VideoFrame::YV12A:
case VideoFrame::YV12J:
+ case VideoFrame::YV12HD:
case VideoFrame::YV24:
case VideoFrame::NV12:
return true;
@@ -52,6 +58,7 @@ bool IsYUV(media::VideoFrame::Format format) {
#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
+ case VideoFrame::ARGB:
return false;
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
@@ -63,6 +70,7 @@ bool IsJPEGColorSpace(media::VideoFrame::Format format) {
case VideoFrame::YV12J:
return true;
case VideoFrame::YV12:
+ case VideoFrame::YV12HD:
case VideoFrame::YV16:
case VideoFrame::I420:
case VideoFrame::YV12A:
@@ -73,6 +81,7 @@ bool IsJPEGColorSpace(media::VideoFrame::Format format) {
#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
+ case VideoFrame::ARGB:
return false;
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
@@ -83,143 +92,91 @@ bool IsYUVOrNative(media::VideoFrame::Format format) {
return IsYUV(format) || format == media::VideoFrame::NATIVE_TEXTURE;
}
-// Converts a |video_frame| to raw |rgb_pixels|.
-void ConvertVideoFrameToRGBPixels(
- const scoped_refptr<media::VideoFrame>& video_frame,
- void* rgb_pixels,
- size_t row_bytes) {
- DCHECK(IsYUVOrNative(video_frame->format()))
- << video_frame->format();
- if (IsYUV(video_frame->format())) {
- DCHECK_EQ(video_frame->stride(media::VideoFrame::kUPlane),
- video_frame->stride(media::VideoFrame::kVPlane));
- }
-
- size_t y_offset = 0;
- size_t uv_offset = 0;
- if (IsYUV(video_frame->format())) {
- int y_shift = (video_frame->format() == media::VideoFrame::YV16) ? 0 : 1;
- // Use the "left" and "top" of the destination rect to locate the offset
- // in Y, U and V planes.
- y_offset = (video_frame->stride(media::VideoFrame::kYPlane) *
- video_frame->visible_rect().y()) +
- video_frame->visible_rect().x();
- // For format YV12, there is one U, V value per 2x2 block.
- // For format YV16, there is one U, V value per 2x1 block.
- uv_offset = (video_frame->stride(media::VideoFrame::kUPlane) *
- (video_frame->visible_rect().y() >> y_shift)) +
- (video_frame->visible_rect().x() >> 1);
- }
+bool IsSkBitmapProperlySizedTexture(const SkBitmap* bitmap,
+ const gfx::Size& size) {
+ return bitmap->getTexture() && bitmap->width() == size.width() &&
+ bitmap->height() == size.height();
+}
- switch (video_frame->format()) {
- case media::VideoFrame::YV12:
- case media::VideoFrame::I420:
- LIBYUV_I420_TO_ARGB(
- video_frame->data(media::VideoFrame::kYPlane) + y_offset,
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
- video_frame->stride(media::VideoFrame::kUPlane),
- video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
- video_frame->stride(media::VideoFrame::kVPlane),
- static_cast<uint8*>(rgb_pixels),
- row_bytes,
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
- break;
+bool AllocateSkBitmapTexture(GrContext* gr,
+ SkBitmap* bitmap,
+ const gfx::Size& size) {
+ DCHECK(gr);
+ GrTextureDesc desc;
+ // Use kRGBA_8888_GrPixelConfig, not kSkia8888_GrPixelConfig, to avoid
+ // RGBA to BGRA conversion.
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ desc.fFlags = kRenderTarget_GrSurfaceFlag;
+ desc.fSampleCnt = 0;
+ desc.fOrigin = kTopLeft_GrSurfaceOrigin;
+ desc.fWidth = size.width();
+ desc.fHeight = size.height();
+ skia::RefPtr<GrTexture> texture = skia::AdoptRef(
+ gr->textureProvider()->refScratchTexture(
+ desc, GrTextureProvider::kExact_ScratchTexMatch));
+ if (!texture.get())
+ return false;
+
+ SkImageInfo info = SkImageInfo::MakeN32Premul(desc.fWidth, desc.fHeight);
+ SkGrPixelRef* pixel_ref = SkNEW_ARGS(SkGrPixelRef, (info, texture.get()));
+ if (!pixel_ref)
+ return false;
+ bitmap->setInfo(info);
+ bitmap->setPixelRef(pixel_ref)->unref();
+ return true;
+}
- case media::VideoFrame::YV12J:
- media::ConvertYUVToRGB32(
- video_frame->data(media::VideoFrame::kYPlane) + y_offset,
- video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
- video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
- static_cast<uint8*>(rgb_pixels),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height(),
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->stride(media::VideoFrame::kUPlane),
- row_bytes,
- media::YV12J);
- break;
+bool CopyVideoFrameTextureToSkBitmapTexture(VideoFrame* video_frame,
+ SkBitmap* bitmap,
+ const Context3D& context_3d) {
+ // Check if we could reuse existing texture based bitmap.
+ // Otherwise, release existing texture based bitmap and allocate
+ // a new one based on video size.
+ if (!IsSkBitmapProperlySizedTexture(bitmap,
+ video_frame->visible_rect().size())) {
+ if (!AllocateSkBitmapTexture(context_3d.gr_context, bitmap,
+ video_frame->visible_rect().size())) {
+ return false;
+ }
+ }
- case media::VideoFrame::YV16:
- LIBYUV_I422_TO_ARGB(
- video_frame->data(media::VideoFrame::kYPlane) + y_offset,
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
- video_frame->stride(media::VideoFrame::kUPlane),
- video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
- video_frame->stride(media::VideoFrame::kVPlane),
- static_cast<uint8*>(rgb_pixels),
- row_bytes,
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
- break;
+ unsigned texture_id =
+ static_cast<unsigned>((bitmap->getTexture())->getTextureHandle());
+ // If CopyVideoFrameTextureToGLTexture() changes the state of the
+ // |texture_id|, it's needed to invalidate the state cached in skia,
+ // but currently the state isn't changed.
+ SkCanvasVideoRenderer::CopyVideoFrameTextureToGLTexture(
+ context_3d.gl, video_frame, texture_id, GL_RGBA, GL_UNSIGNED_BYTE, true,
+ false);
+ bitmap->notifyPixelsChanged();
+ return true;
+}
- case media::VideoFrame::YV12A:
- // Since libyuv doesn't support YUVA, fallback to media, which is not ARM
- // optimized.
- // TODO(fbarchard, mtomasz): Use libyuv, then copy the alpha channel.
- media::ConvertYUVAToARGB(
- video_frame->data(media::VideoFrame::kYPlane) + y_offset,
- video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
- video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
- video_frame->data(media::VideoFrame::kAPlane),
- static_cast<uint8*>(rgb_pixels),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height(),
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->stride(media::VideoFrame::kUPlane),
- video_frame->stride(media::VideoFrame::kAPlane),
- row_bytes,
- media::YV12);
- break;
+class SyncPointClientImpl : public VideoFrame::SyncPointClient {
+ public:
+ explicit SyncPointClientImpl(gpu::gles2::GLES2Interface* gl) : gl_(gl) {}
+ ~SyncPointClientImpl() override {}
+ uint32 InsertSyncPoint() override { return gl_->InsertSyncPointCHROMIUM(); }
+ void WaitSyncPoint(uint32 sync_point) override {
+ gl_->WaitSyncPointCHROMIUM(sync_point);
+ }
- case media::VideoFrame::YV24:
- libyuv::I444ToARGB(
- video_frame->data(media::VideoFrame::kYPlane) + y_offset,
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
- video_frame->stride(media::VideoFrame::kUPlane),
- video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
- video_frame->stride(media::VideoFrame::kVPlane),
- static_cast<uint8*>(rgb_pixels),
- row_bytes,
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
-#if SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
- SK_A32_SHIFT == 24
- libyuv::ARGBToABGR(static_cast<uint8*>(rgb_pixels),
- row_bytes,
- static_cast<uint8*>(rgb_pixels),
- row_bytes,
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
-#endif
- break;
+ private:
+ gpu::gles2::GLES2Interface* gl_;
- case media::VideoFrame::NATIVE_TEXTURE: {
- DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE);
- SkBitmap tmp;
- tmp.installPixels(
- SkImageInfo::MakeN32Premul(video_frame->visible_rect().width(),
- video_frame->visible_rect().height()),
- rgb_pixels,
- row_bytes);
- video_frame->ReadPixelsFromNativeTexture(tmp);
- break;
- }
- default:
- NOTREACHED();
- break;
- }
-}
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SyncPointClientImpl);
+};
} // anonymous namespace
// Generates an RGB image from a VideoFrame. Convert YUV to RGB plain on GPU.
class VideoImageGenerator : public SkImageGenerator {
public:
- VideoImageGenerator(const scoped_refptr<VideoFrame>& frame) : frame_(frame) {
+ VideoImageGenerator(const scoped_refptr<VideoFrame>& frame)
+ : SkImageGenerator(
+ SkImageInfo::MakeN32Premul(frame->visible_rect().width(),
+ frame->visible_rect().height()))
+ , frame_(frame) {
DCHECK(frame_.get());
}
~VideoImageGenerator() override {}
@@ -227,34 +184,31 @@ class VideoImageGenerator : public SkImageGenerator {
void set_frame(const scoped_refptr<VideoFrame>& frame) { frame_ = frame; }
protected:
- bool onGetInfo(SkImageInfo* info) override {
- info->fWidth = frame_->visible_rect().width();
- info->fHeight = frame_->visible_rect().height();
- info->fColorType = kN32_SkColorType;
- info->fAlphaType = kPremul_SkAlphaType;
- return true;
- }
-
- bool onGetPixels(const SkImageInfo& info,
- void* pixels,
- size_t row_bytes,
- SkPMColor ctable[],
- int* ctable_count) override {
+ Result onGetPixels(const SkImageInfo& info,
+ void* pixels,
+ size_t row_bytes,
+ SkPMColor ctable[],
+ int* ctable_count) override {
if (!frame_.get())
- return false;
- if (!pixels)
- return false;
+ return kInvalidInput;
// If skia couldn't do the YUV conversion on GPU, we will on CPU.
- ConvertVideoFrameToRGBPixels(frame_, pixels, row_bytes);
- return true;
+ SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
+ frame_, pixels, row_bytes);
+ return kSuccess;
}
bool onGetYUV8Planes(SkISize sizes[3],
void* planes[3],
size_t row_bytes[3],
SkYUVColorSpace* color_space) override {
- if (!frame_.get() || !IsYUV(frame_->format()))
+ if (!frame_.get() || !IsYUV(frame_->format()) ||
+ // TODO(rileya): Skia currently doesn't support Rec709 YUV conversion,
+ // or YUVA conversion. Remove this case once it does. As-is we will
+ // fall back on the pure-software path in this case.
+ frame_->format() == VideoFrame::YV12HD ||
+ frame_->format() == VideoFrame::YV12A) {
return false;
+ }
if (color_space) {
if (IsJPEGColorSpace(frame_->format()))
@@ -286,8 +240,27 @@ class VideoImageGenerator : public SkImageGenerator {
(frame_->visible_rect().y() >> y_shift)) +
(frame_->visible_rect().x() >> 1);
}
- row_bytes[plane] = static_cast<size_t>(frame_->stride(plane));
- planes[plane] = frame_->data(plane) + offset;
+
+ // Copy the frame to the supplied memory.
+ // TODO: Find a way (API change?) to avoid this copy.
+ char* out_line = static_cast<char*>(planes[plane]);
+ int out_line_stride = row_bytes[plane];
+ uint8* in_line = frame_->data(plane) + offset;
+ int in_line_stride = frame_->stride(plane);
+ int plane_height = sizes[plane].height();
+ if (in_line_stride == out_line_stride) {
+ memcpy(out_line, in_line, plane_height * in_line_stride);
+ } else {
+ // Different line padding so need to copy one line at a time.
+ int bytes_to_copy_per_line = out_line_stride < in_line_stride
+ ? out_line_stride
+ : in_line_stride;
+ for (int line_no = 0; line_no < plane_height; line_no++) {
+ memcpy(out_line, in_line, bytes_to_copy_per_line);
+ in_line += in_line_stride;
+ out_line += out_line_stride;
+ }
+ }
}
}
return true;
@@ -306,7 +279,7 @@ SkCanvasVideoRenderer::SkCanvasVideoRenderer()
base::TimeDelta::FromSeconds(kTemporaryResourceDeletionDelay),
this,
&SkCanvasVideoRenderer::ResetLastFrame),
- accelerated_generator_(NULL),
+ accelerated_generator_(nullptr),
accelerated_last_frame_timestamp_(media::kNoTimestamp()),
accelerated_frame_deleting_timer_(
FROM_HERE,
@@ -323,7 +296,8 @@ void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
const gfx::RectF& dest_rect,
uint8 alpha,
SkXfermode::Mode mode,
- VideoRotation video_rotation) {
+ VideoRotation video_rotation,
+ const Context3D& context_3d) {
if (alpha == 0) {
return;
}
@@ -343,16 +317,50 @@ void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
return;
}
- SkBitmap* target_frame = NULL;
- if (canvas->getGrContext()) {
+ SkBitmap* target_frame = nullptr;
+
+ if (video_frame->format() == VideoFrame::NATIVE_TEXTURE) {
+ // Draw HW Video on both SW and HW Canvas.
+ // In SW Canvas case, rely on skia drawing Ganesh SkBitmap on SW SkCanvas.
+ if (accelerated_last_frame_.isNull() ||
+ video_frame->timestamp() != accelerated_last_frame_timestamp_) {
+ DCHECK(context_3d.gl);
+ DCHECK(context_3d.gr_context);
+ if (accelerated_generator_) {
+ // Reset SkBitmap used in SWVideo-to-HWCanvas path.
+ accelerated_last_frame_.reset();
+ accelerated_generator_ = nullptr;
+ }
+ if (!CopyVideoFrameTextureToSkBitmapTexture(
+ video_frame.get(), &accelerated_last_frame_, context_3d)) {
+ NOTREACHED();
+ return;
+ }
+ DCHECK(video_frame->visible_rect().width() ==
+ accelerated_last_frame_.width() &&
+ video_frame->visible_rect().height() ==
+ accelerated_last_frame_.height());
+
+ accelerated_last_frame_timestamp_ = video_frame->timestamp();
+ }
+ target_frame = &accelerated_last_frame_;
+ accelerated_frame_deleting_timer_.Reset();
+ } else if (canvas->getGrContext()) {
+ DCHECK(video_frame->format() != VideoFrame::NATIVE_TEXTURE);
if (accelerated_last_frame_.isNull() ||
video_frame->timestamp() != accelerated_last_frame_timestamp_) {
+ // Draw SW Video on HW Canvas.
+ if (!accelerated_generator_ && !accelerated_last_frame_.isNull()) {
+ // Reset SkBitmap used in HWVideo-to-HWCanvas path.
+ accelerated_last_frame_.reset();
+ }
accelerated_generator_ = new VideoImageGenerator(video_frame);
// Note: This takes ownership of |accelerated_generator_|.
if (!SkInstallDiscardablePixelRef(accelerated_generator_,
&accelerated_last_frame_)) {
NOTREACHED();
+ return;
}
DCHECK(video_frame->visible_rect().width() ==
accelerated_last_frame_.width() &&
@@ -360,13 +368,14 @@ void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
accelerated_last_frame_.height());
accelerated_last_frame_timestamp_ = video_frame->timestamp();
- } else {
+ } else if (accelerated_generator_) {
accelerated_generator_->set_frame(video_frame);
}
target_frame = &accelerated_last_frame_;
accelerated_frame_deleting_timer_.Reset();
} else {
- // Check if we should convert and update |last_frame_|.
+ // Draw SW Video on SW Canvas.
+ DCHECK(video_frame->format() != VideoFrame::NATIVE_TEXTURE);
if (last_frame_.isNull() ||
video_frame->timestamp() != last_frame_timestamp_) {
// Check if |bitmap| needs to be (re)allocated.
@@ -389,7 +398,7 @@ void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
}
paint.setXfermodeMode(mode);
- paint.setFilterLevel(SkPaint::kLow_FilterLevel);
+ paint.setFilterQuality(kLow_SkFilterQuality);
bool need_transform =
video_rotation != VIDEO_ROTATION_0 ||
@@ -434,18 +443,196 @@ void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
canvas->flush();
// SkCanvas::flush() causes the generator to generate SkImage, so delete
// |video_frame| not to be outlived.
- if (canvas->getGrContext())
- accelerated_generator_->set_frame(NULL);
+ if (canvas->getGrContext() && accelerated_generator_)
+ accelerated_generator_->set_frame(nullptr);
}
void SkCanvasVideoRenderer::Copy(const scoped_refptr<VideoFrame>& video_frame,
- SkCanvas* canvas) {
- Paint(video_frame,
- canvas,
- video_frame->visible_rect(),
- 0xff,
- SkXfermode::kSrc_Mode,
- media::VIDEO_ROTATION_0);
+ SkCanvas* canvas,
+ const Context3D& context_3d) {
+ Paint(video_frame, canvas, video_frame->visible_rect(), 0xff,
+ SkXfermode::kSrc_Mode, media::VIDEO_ROTATION_0, context_3d);
+}
+
+// static
+void SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ void* rgb_pixels,
+ size_t row_bytes) {
+ DCHECK(IsYUVOrNative(video_frame->format()))
+ << video_frame->format();
+ if (IsYUV(video_frame->format())) {
+ DCHECK_EQ(video_frame->stride(media::VideoFrame::kUPlane),
+ video_frame->stride(media::VideoFrame::kVPlane));
+ }
+
+ size_t y_offset = 0;
+ size_t uv_offset = 0;
+ if (IsYUV(video_frame->format())) {
+ int y_shift = (video_frame->format() == media::VideoFrame::YV16) ? 0 : 1;
+ // Use the "left" and "top" of the destination rect to locate the offset
+ // in Y, U and V planes.
+ y_offset = (video_frame->stride(media::VideoFrame::kYPlane) *
+ video_frame->visible_rect().y()) +
+ video_frame->visible_rect().x();
+ // For format YV12, there is one U, V value per 2x2 block.
+ // For format YV16, there is one U, V value per 2x1 block.
+ uv_offset = (video_frame->stride(media::VideoFrame::kUPlane) *
+ (video_frame->visible_rect().y() >> y_shift)) +
+ (video_frame->visible_rect().x() >> 1);
+ }
+
+ switch (video_frame->format()) {
+ case VideoFrame::YV12:
+ case VideoFrame::I420:
+ LIBYUV_I420_TO_ARGB(
+ video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->stride(VideoFrame::kUPlane),
+ video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ video_frame->stride(VideoFrame::kVPlane),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+ break;
+
+ case VideoFrame::YV12J:
+ ConvertYUVToRGB32(
+ video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ static_cast<uint8*>(rgb_pixels),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height(),
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->stride(VideoFrame::kUPlane),
+ row_bytes,
+ YV12J);
+ break;
+
+ case VideoFrame::YV12HD:
+ ConvertYUVToRGB32(
+ video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ static_cast<uint8*>(rgb_pixels),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height(),
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->stride(VideoFrame::kUPlane),
+ row_bytes,
+ YV12HD);
+ break;
+
+ case VideoFrame::YV16:
+ LIBYUV_I422_TO_ARGB(
+ video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->stride(VideoFrame::kUPlane),
+ video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ video_frame->stride(VideoFrame::kVPlane),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+ break;
+
+ case VideoFrame::YV12A:
+ // Since libyuv doesn't support YUVA, fallback to media, which is not ARM
+ // optimized.
+ // TODO(fbarchard, mtomasz): Use libyuv, then copy the alpha channel.
+ ConvertYUVAToARGB(
+ video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ video_frame->data(VideoFrame::kAPlane),
+ static_cast<uint8*>(rgb_pixels),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height(),
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->stride(VideoFrame::kUPlane),
+ video_frame->stride(VideoFrame::kAPlane),
+ row_bytes,
+ YV12);
+ break;
+
+ case VideoFrame::YV24:
+ libyuv::I444ToARGB(
+ video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->stride(VideoFrame::kUPlane),
+ video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ video_frame->stride(VideoFrame::kVPlane),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+#if SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
+ SK_A32_SHIFT == 24
+ libyuv::ARGBToABGR(static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+#endif
+ break;
+
+ case VideoFrame::NATIVE_TEXTURE:
+ NOTREACHED();
+ break;
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ case VideoFrame::ARGB:
+ case VideoFrame::UNKNOWN:
+ case VideoFrame::NV12:
+ NOTREACHED();
+ }
+}
+
+// static
+void SkCanvasVideoRenderer::CopyVideoFrameTextureToGLTexture(
+ gpu::gles2::GLES2Interface* gl,
+ VideoFrame* video_frame,
+ unsigned int texture,
+ unsigned int internal_format,
+ unsigned int type,
+ bool premultiply_alpha,
+ bool flip_y) {
+ DCHECK(video_frame && video_frame->format() == VideoFrame::NATIVE_TEXTURE);
+ DCHECK_EQ(1u, VideoFrame::NumTextures(video_frame->texture_format()));
+ const gpu::MailboxHolder& mailbox_holder = video_frame->mailbox_holder(0);
+ DCHECK(mailbox_holder.texture_target == GL_TEXTURE_2D ||
+ mailbox_holder.texture_target == GL_TEXTURE_RECTANGLE_ARB ||
+ mailbox_holder.texture_target == GL_TEXTURE_EXTERNAL_OES);
+
+ gl->WaitSyncPointCHROMIUM(mailbox_holder.sync_point);
+ uint32 source_texture = gl->CreateAndConsumeTextureCHROMIUM(
+ mailbox_holder.texture_target, mailbox_holder.mailbox.name);
+
+ // The video is stored in a unmultiplied format, so premultiply
+ // if necessary.
+ gl->PixelStorei(GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM, premultiply_alpha);
+ // Application itself needs to take care of setting the right |flip_y|
+ // value down to get the expected result.
+ // "flip_y == true" means to reverse the video orientation while
+ // "flip_y == false" means to keep the intrinsic orientation.
+ gl->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
+ gl->CopyTextureCHROMIUM(GL_TEXTURE_2D, source_texture, texture,
+ internal_format, type);
+ gl->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, false);
+ gl->PixelStorei(GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM, false);
+
+ gl->DeleteTextures(1, &source_texture);
+ gl->Flush();
+
+ SyncPointClientImpl client(gl);
+ video_frame->UpdateReleaseSyncPoint(&client);
}
void SkCanvasVideoRenderer::ResetLastFrame() {
diff --git a/chromium/media/filters/skcanvas_video_renderer.h b/chromium/media/blink/skcanvas_video_renderer.h
index 1e81d316f56..9b75cd01e0f 100644
--- a/chromium/media/filters/skcanvas_video_renderer.h
+++ b/chromium/media/blink/skcanvas_video_renderer.h
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_SKCANVAS_VIDEO_RENDERER_H_
-#define MEDIA_FILTERS_SKCANVAS_VIDEO_RENDERER_H_
+#ifndef MEDIA_BLINK_SKCANVAS_VIDEO_RENDERER_H_
+#define MEDIA_BLINK_SKCANVAS_VIDEO_RENDERER_H_
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "media/base/media_export.h"
#include "media/base/video_rotation.h"
+#include "media/filters/context_3d.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkXfermode.h"
-#include "ui/gfx/rect.h"
+#include "ui/gfx/geometry/rect.h"
class SkCanvas;
@@ -30,6 +31,8 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
// Paints |video_frame| on |canvas|, scaling and rotating the result to fit
// dimensions specified by |dest_rect|.
+ // If the format of |video_frame| is VideoFrame::NATIVE_TEXTURE, |context_3d|
+ // must be provided.
//
// Black will be painted on |canvas| if |video_frame| is null.
void Paint(const scoped_refptr<VideoFrame>& video_frame,
@@ -37,10 +40,34 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
const gfx::RectF& dest_rect,
uint8 alpha,
SkXfermode::Mode mode,
- VideoRotation video_rotation);
+ VideoRotation video_rotation,
+ const Context3D& context_3d);
// Copy |video_frame| on |canvas|.
- void Copy(const scoped_refptr<VideoFrame>&, SkCanvas* canvas);
+ // If the format of |video_frame| is VideoFrame::NATIVE_TEXTURE, |context_3d|
+ // must be provided.
+ void Copy(const scoped_refptr<VideoFrame>& video_frame,
+ SkCanvas* canvas,
+ const Context3D& context_3d);
+
+ // Convert the contents of |video_frame| to raw RGB pixels. |rgb_pixels|
+ // should point into a buffer large enough to hold as many 32 bit RGBA pixels
+ // as are in the visible_rect() area of the frame.
+ static void ConvertVideoFrameToRGBPixels(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ void* rgb_pixels,
+ size_t row_bytes);
+
+ // Copy the contents of texture of |video_frame| to texture |texture|.
+ // |level|, |internal_format|, |type| specify target texture |texture|.
+ // The format of |video_frame| must be VideoFrame::NATIVE_TEXTURE.
+ static void CopyVideoFrameTextureToGLTexture(gpu::gles2::GLES2Interface* gl,
+ VideoFrame* video_frame,
+ unsigned int texture,
+ unsigned int internal_format,
+ unsigned int type,
+ bool premultiply_alpha,
+ bool flip_y);
private:
void ResetLastFrame();
@@ -67,4 +94,4 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
} // namespace media
-#endif // MEDIA_FILTERS_SKCANVAS_VIDEO_RENDERER_H_
+#endif // MEDIA_BLINK_SKCANVAS_VIDEO_RENDERER_H_
diff --git a/chromium/media/filters/skcanvas_video_renderer_unittest.cc b/chromium/media/blink/skcanvas_video_renderer_unittest.cc
index 8e1ba59701f..3062d77164c 100644
--- a/chromium/media/filters/skcanvas_video_renderer_unittest.cc
+++ b/chromium/media/blink/skcanvas_video_renderer_unittest.cc
@@ -5,9 +5,9 @@
#include "base/message_loop/message_loop.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
+#include "media/blink/skcanvas_video_renderer.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/skia/include/core/SkCanvas.h"
-#include "media/filters/skcanvas_video_renderer.h"
using media::VideoFrame;
@@ -197,12 +197,8 @@ SkCanvasVideoRendererTest::SkCanvasVideoRendererTest()
SkCanvasVideoRendererTest::~SkCanvasVideoRendererTest() {}
void SkCanvasVideoRendererTest::PaintWithoutFrame(SkCanvas* canvas) {
- renderer_.Paint(NULL,
- canvas,
- kNaturalRect,
- 0xFF,
- SkXfermode::kSrcOver_Mode,
- VIDEO_ROTATION_0);
+ renderer_.Paint(nullptr, canvas, kNaturalRect, 0xFF,
+ SkXfermode::kSrcOver_Mode, VIDEO_ROTATION_0, Context3D());
}
void SkCanvasVideoRendererTest::Paint(
@@ -237,13 +233,14 @@ void SkCanvasVideoRendererTest::PaintRotated(
media::FillYUV(video_frame.get(), 29, 255, 107);
break;
}
- renderer_.Paint(video_frame, canvas, dest_rect, 0xFF, mode, video_rotation);
+ renderer_.Paint(video_frame, canvas, dest_rect, 0xFF, mode, video_rotation,
+ Context3D());
}
void SkCanvasVideoRendererTest::Copy(
const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas) {
- renderer_.Copy(video_frame, canvas);
+ renderer_.Copy(video_frame, canvas, Context3D());
}
TEST_F(SkCanvasVideoRendererTest, NoFrame) {
diff --git a/chromium/media/blink/test_response_generator.cc b/chromium/media/blink/test_response_generator.cc
index aa3b74890ce..de7b5d5bce6 100644
--- a/chromium/media/blink/test_response_generator.cc
+++ b/chromium/media/blink/test_response_generator.cc
@@ -9,7 +9,6 @@
#include "base/strings/stringprintf.h"
#include "net/base/net_errors.h"
#include "third_party/WebKit/public/platform/WebString.h"
-#include "third_party/WebKit/public/platform/WebURLResponse.h"
using blink::WebString;
using blink::WebURLError;
diff --git a/chromium/media/blink/video_frame_compositor.cc b/chromium/media/blink/video_frame_compositor.cc
index 7f254d4bc84..f38a9ba18fc 100644
--- a/chromium/media/blink/video_frame_compositor.cc
+++ b/chromium/media/blink/video_frame_compositor.cc
@@ -4,15 +4,24 @@
#include "media/blink/video_frame_compositor.h"
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/time/default_tick_clock.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/video_frame.h"
namespace media {
+// Amount of time to wait between UpdateCurrentFrame() callbacks before starting
+// background rendering to keep the Render() callbacks moving.
+const int kBackgroundRenderingTimeoutMs = 250;
+
static bool IsOpaque(const scoped_refptr<VideoFrame>& frame) {
switch (frame->format()) {
case VideoFrame::UNKNOWN:
case VideoFrame::YV12:
case VideoFrame::YV12J:
+ case VideoFrame::YV12HD:
case VideoFrame::YV16:
case VideoFrame::I420:
case VideoFrame::YV24:
@@ -24,54 +33,229 @@ static bool IsOpaque(const scoped_refptr<VideoFrame>& frame) {
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
case VideoFrame::NATIVE_TEXTURE:
+ case VideoFrame::ARGB:
break;
}
return false;
}
VideoFrameCompositor::VideoFrameCompositor(
+ const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner,
const base::Callback<void(gfx::Size)>& natural_size_changed_cb,
const base::Callback<void(bool)>& opacity_changed_cb)
- : natural_size_changed_cb_(natural_size_changed_cb),
+ : compositor_task_runner_(compositor_task_runner),
+ tick_clock_(new base::DefaultTickClock()),
+ natural_size_changed_cb_(natural_size_changed_cb),
opacity_changed_cb_(opacity_changed_cb),
- client_(NULL) {
+ background_rendering_enabled_(true),
+ background_rendering_timer_(
+ FROM_HERE,
+ base::TimeDelta::FromMilliseconds(kBackgroundRenderingTimeoutMs),
+ base::Bind(&VideoFrameCompositor::BackgroundRender,
+ base::Unretained(this)),
+ // Task is not repeating, CallRender() will reset the task as needed.
+ false),
+ client_(nullptr),
+ rendering_(false),
+ rendered_last_frame_(false),
+ is_background_rendering_(false),
+ // Assume 60Hz before the first UpdateCurrentFrame() call.
+ last_interval_(base::TimeDelta::FromSecondsD(1.0 / 60)),
+ callback_(nullptr) {
+ background_rendering_timer_.SetTaskRunner(compositor_task_runner_);
}
VideoFrameCompositor::~VideoFrameCompositor() {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+ DCHECK(!callback_);
+ DCHECK(!rendering_);
if (client_)
client_->StopUsingProvider();
}
+void VideoFrameCompositor::OnRendererStateUpdate(bool new_state) {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+ DCHECK_NE(rendering_, new_state);
+ rendering_ = new_state;
+
+ if (rendering_) {
+ // Always start playback in background rendering mode, if |client_| kicks
+ // in right away it's okay.
+ BackgroundRender();
+ } else if (background_rendering_enabled_) {
+ background_rendering_timer_.Stop();
+ } else {
+ DCHECK(!background_rendering_timer_.IsRunning());
+ }
+
+ if (!client_)
+ return;
+
+ if (rendering_)
+ client_->StartRendering();
+ else
+ client_->StopRendering();
+}
+
void VideoFrameCompositor::SetVideoFrameProviderClient(
cc::VideoFrameProvider::Client* client) {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
if (client_)
client_->StopUsingProvider();
client_ = client;
+
+ // |client_| may now be null, so verify before calling it.
+ if (rendering_ && client_)
+ client_->StartRendering();
}
scoped_refptr<VideoFrame> VideoFrameCompositor::GetCurrentFrame() {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
return current_frame_;
}
-void VideoFrameCompositor::PutCurrentFrame(
+void VideoFrameCompositor::PutCurrentFrame() {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+ rendered_last_frame_ = true;
+}
+
+bool VideoFrameCompositor::UpdateCurrentFrame(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max) {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+ return CallRender(deadline_min, deadline_max, false);
+}
+
+bool VideoFrameCompositor::HasCurrentFrame() {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+ return current_frame_;
+}
+
+void VideoFrameCompositor::Start(RenderCallback* callback) {
+ TRACE_EVENT0("media", "VideoFrameCompositor::Start");
+
+ // Called from the media thread, so acquire the callback under lock before
+ // returning in case a Stop() call comes in before the PostTask is processed.
+ base::AutoLock lock(lock_);
+ DCHECK(!callback_);
+ callback_ = callback;
+ compositor_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VideoFrameCompositor::OnRendererStateUpdate,
+ base::Unretained(this), true));
+}
+
+void VideoFrameCompositor::Stop() {
+ TRACE_EVENT0("media", "VideoFrameCompositor::Stop");
+
+ // Called from the media thread, so release the callback under lock before
+ // returning to avoid a pending UpdateCurrentFrame() call occurring before
+ // the PostTask is processed.
+ base::AutoLock lock(lock_);
+ DCHECK(callback_);
+ callback_ = nullptr;
+ compositor_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VideoFrameCompositor::OnRendererStateUpdate,
+ base::Unretained(this), false));
+}
+
+void VideoFrameCompositor::PaintFrameUsingOldRenderingPath(
const scoped_refptr<VideoFrame>& frame) {
+ if (!compositor_task_runner_->BelongsToCurrentThread()) {
+ compositor_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoFrameCompositor::PaintFrameUsingOldRenderingPath,
+ base::Unretained(this), frame));
+ return;
+ }
+
+ if (ProcessNewFrame(frame) && client_)
+ client_->DidReceiveFrame();
}
-void VideoFrameCompositor::UpdateCurrentFrame(
+scoped_refptr<VideoFrame>
+VideoFrameCompositor::GetCurrentFrameAndUpdateIfStale() {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+ if (client_ || !rendering_ || !is_background_rendering_)
+ return current_frame_;
+
+ DCHECK(!last_background_render_.is_null());
+
+ const base::TimeTicks now = tick_clock_->NowTicks();
+ const base::TimeDelta interval = now - last_background_render_;
+
+ // Cap updates to 250Hz which should be more than enough for everyone.
+ if (interval < base::TimeDelta::FromMilliseconds(4))
+ return current_frame_;
+
+ // Update the interval based on the time between calls and call background
+ // render which will give this information to the client.
+ last_interval_ = interval;
+ BackgroundRender();
+
+ return current_frame_;
+}
+
+bool VideoFrameCompositor::ProcessNewFrame(
const scoped_refptr<VideoFrame>& frame) {
- if (current_frame_.get() &&
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+
+ if (frame == current_frame_)
+ return false;
+
+ // Set the flag indicating that the current frame is unrendered, if we get a
+ // subsequent PutCurrentFrame() call it will mark it as rendered.
+ rendered_last_frame_ = false;
+
+ if (current_frame_ &&
current_frame_->natural_size() != frame->natural_size()) {
natural_size_changed_cb_.Run(frame->natural_size());
}
- if (!current_frame_.get() || IsOpaque(current_frame_) != IsOpaque(frame)) {
+ if (!current_frame_ || IsOpaque(current_frame_) != IsOpaque(frame))
opacity_changed_cb_.Run(IsOpaque(frame));
- }
current_frame_ = frame;
+ return true;
+}
- if (client_)
- client_->DidReceiveFrame();
+void VideoFrameCompositor::BackgroundRender() {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+ const base::TimeTicks now = tick_clock_->NowTicks();
+ last_background_render_ = now;
+ CallRender(now, now + last_interval_, true);
+}
+
+bool VideoFrameCompositor::CallRender(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ bool background_rendering) {
+ DCHECK(compositor_task_runner_->BelongsToCurrentThread());
+
+ base::AutoLock lock(lock_);
+ if (!callback_) {
+ // Even if we no longer have a callback, return true if we have a frame
+ // which |client_| hasn't seen before.
+ return !rendered_last_frame_ && current_frame_;
+ }
+
+ DCHECK(rendering_);
+
+ // If the previous frame was never rendered and we're not in background
+ // rendering mode (nor have just exited it), let the client know.
+ if (!rendered_last_frame_ && current_frame_ && !background_rendering &&
+ !is_background_rendering_) {
+ callback_->OnFrameDropped();
+ }
+
+ const bool new_frame = ProcessNewFrame(
+ callback_->Render(deadline_min, deadline_max, background_rendering));
+
+ is_background_rendering_ = background_rendering;
+ last_interval_ = deadline_max - deadline_min;
+
+ // Restart the background rendering timer whether we're background rendering
+ // or not; in either case we should wait for |kBackgroundRenderingTimeoutMs|.
+ if (background_rendering_enabled_)
+ background_rendering_timer_.Reset();
+ return new_frame;
}
} // namespace media
diff --git a/chromium/media/blink/video_frame_compositor.h b/chromium/media/blink/video_frame_compositor.h
index f8bdd1e63fc..c775b0ca2ad 100644
--- a/chromium/media/blink/video_frame_compositor.h
+++ b/chromium/media/blink/video_frame_compositor.h
@@ -7,24 +7,50 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/time/tick_clock.h"
+#include "base/timer/timer.h"
#include "cc/layers/video_frame_provider.h"
#include "media/base/media_export.h"
-#include "ui/gfx/size.h"
+#include "media/base/video_renderer_sink.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
class VideoFrame;
-// VideoFrameCompositor handles incoming frames by notifying the compositor and
-// dispatching callbacks when detecting changes in video frames.
+// VideoFrameCompositor acts as a bridge between the media and cc layers for
+// rendering video frames. I.e. a media::VideoRenderer will talk to this class
+// from the media side, while a cc::VideoFrameProvider::Client will talk to it
+// from the cc side.
//
-// Typical usage is to deliver ready-to-be-displayed video frames to
-// UpdateCurrentFrame() so that VideoFrameCompositor can take care of tracking
-// changes in video frames and firing callbacks as needed.
+// This class is responsible for requesting new frames from a video renderer in
+// response to requests from the VFP::Client. Since the VFP::Client may stop
+// issuing requests in response to visibility changes it is also responsible for
+// ensuring the "freshness" of the current frame for programmatic frame
+// requests; e.g., Canvas.drawImage() requests
//
-// VideoFrameCompositor must live on the same thread as the compositor.
+// This class is also responsible for detecting frames dropped by the compositor
+// after rendering and signaling that information to a RenderCallback. It
+// detects frames not dropped by verifying each GetCurrentFrame() is followed
+// by a PutCurrentFrame() before the next UpdateCurrentFrame() call.
+//
+// VideoRenderSink::RenderCallback implementations must call Start() and Stop()
+// once new frames are expected or are no longer expected to be ready; this data
+// is relayed to the compositor to avoid extraneous callbacks.
+//
+// VideoFrameCompositor is also responsible for pumping UpdateCurrentFrame()
+// callbacks in the background when |client_| has decided to suspend them.
+//
+// VideoFrameCompositor must live on the same thread as the compositor, though
+// it may be constructed on any thread.
class MEDIA_EXPORT VideoFrameCompositor
- : NON_EXPORTED_BASE(public cc::VideoFrameProvider) {
+ : public VideoRendererSink,
+ NON_EXPORTED_BASE(public cc::VideoFrameProvider) {
public:
+ // |compositor_task_runner| is the task runner on which this class will live,
+ // though it may be constructed on any thread.
+ //
// |natural_size_changed_cb| is run with the new natural size of the video
// frame whenever a change in natural size is detected. It is not called the
// first time UpdateCurrentFrame() is called. Run on the same thread as the
@@ -34,30 +60,111 @@ class MEDIA_EXPORT VideoFrameCompositor
// called the first time UpdateCurrentFrame() is called. Run on the same
// thread as the caller of UpdateCurrentFrame().
//
- // TODO(scherkus): Investigate the inconsistency between the callbacks with
+ // TODO(dalecurtis): Investigate the inconsistency between the callbacks with
// respect to why we don't call |natural_size_changed_cb| on the first frame.
// I suspect it was for historical reasons that no longer make sense.
VideoFrameCompositor(
+ const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner,
const base::Callback<void(gfx::Size)>& natural_size_changed_cb,
const base::Callback<void(bool)>& opacity_changed_cb);
+
+ // Destruction must happen on the compositor thread; Stop() must have been
+ // called before destruction starts.
~VideoFrameCompositor() override;
- // cc::VideoFrameProvider implementation.
+ // cc::VideoFrameProvider implementation. These methods must be called on the
+ // |compositor_task_runner_|.
void SetVideoFrameProviderClient(
cc::VideoFrameProvider::Client* client) override;
+ bool UpdateCurrentFrame(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max) override;
+ bool HasCurrentFrame() override;
scoped_refptr<VideoFrame> GetCurrentFrame() override;
- void PutCurrentFrame(const scoped_refptr<VideoFrame>& frame) override;
+ void PutCurrentFrame() override;
+
+ // VideoRendererSink implementation. These methods must be called from the
+ // same thread (typically the media thread).
+ void Start(RenderCallback* callback) override;
+ void Stop() override;
+ void PaintFrameUsingOldRenderingPath(
+ const scoped_refptr<VideoFrame>& frame) override;
+
+ // Returns |current_frame_| if |client_| is set. If no |client_| is set,
+ // |is_background_rendering_| is true, and |callback_| is set, it requests a
+ // new frame from |callback_|, using the elapsed time between calls to this
+ // function as the render interval; defaulting to 16.6ms if no prior calls
+ // have been made. A cap of 250Hz (4ms) is in place to prevent clients from
+ // accidentally (or intentionally) spamming the rendering pipeline.
+ //
+ // This method is primarily to facilitate canvas and WebGL based applications
+ // where the <video> tag is invisible (possibly not even in the DOM) and thus
+ // does not receive a |client_|. In this case, frame acquisition is driven by
+ // the frequency of canvas or WebGL paints requested via JavaScript.
+ scoped_refptr<VideoFrame> GetCurrentFrameAndUpdateIfStale();
+
+ void set_tick_clock_for_testing(scoped_ptr<base::TickClock> tick_clock) {
+ tick_clock_ = tick_clock.Pass();
+ }
+
+ void clear_current_frame_for_testing() { current_frame_ = nullptr; }
- // Updates the current frame and notifies the compositor.
- void UpdateCurrentFrame(const scoped_refptr<VideoFrame>& frame);
+ // Enables or disables background rendering. If |enabled|, |timeout| is the
+ // amount of time to wait after the last Render() call before starting the
+ // background rendering mode. Note, this can not disable the background
+ // rendering call issues when a sink is started.
+ void set_background_rendering_for_testing(bool enabled) {
+ background_rendering_enabled_ = enabled;
+ }
private:
- base::Callback<void(gfx::Size)> natural_size_changed_cb_;
- base::Callback<void(bool)> opacity_changed_cb_;
+ // Called on the compositor thread in response to Start() or Stop() calls;
+ // must be used to change |rendering_| state.
+ void OnRendererStateUpdate(bool new_state);
- cc::VideoFrameProvider::Client* client_;
+ // Handles setting of |current_frame_| and fires |natural_size_changed_cb_|
+ // and |opacity_changed_cb_| when the frame properties changes.
+ bool ProcessNewFrame(const scoped_refptr<VideoFrame>& frame);
+
+ // Called by |background_rendering_timer_| when enough time elapses where we
+ // haven't seen a Render() call.
+ void BackgroundRender();
+
+ // If |callback_| is available, calls Render() with the provided properties.
+ // Updates |is_background_rendering_|, |last_interval_|, and resets
+ // |background_rendering_timer_|. Ensures that natural size and opacity
+ // changes are correctly fired. Returns true if there's a new frame available
+ // via GetCurrentFrame().
+ bool CallRender(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ bool background_rendering);
+
+ scoped_refptr<base::SingleThreadTaskRunner> compositor_task_runner_;
+ scoped_ptr<base::TickClock> tick_clock_;
+
+ // These callbacks are executed on the compositor thread.
+ const base::Callback<void(gfx::Size)> natural_size_changed_cb_;
+ const base::Callback<void(bool)> opacity_changed_cb_;
+ // Allows tests to disable the background rendering task.
+ bool background_rendering_enabled_;
+
+ // Manages UpdateCurrentFrame() callbacks if |client_| has stopped sending
+ // them for various reasons. Runs on |compositor_task_runner_| and is reset
+ // after each successful UpdateCurrentFrame() call.
+ base::Timer background_rendering_timer_;
+
+ // These values are only set and read on the compositor thread.
+ cc::VideoFrameProvider::Client* client_;
scoped_refptr<VideoFrame> current_frame_;
+ bool rendering_;
+ bool rendered_last_frame_;
+ bool is_background_rendering_;
+ base::TimeDelta last_interval_;
+ base::TimeTicks last_background_render_;
+
+ // These values are updated and read from the media and compositor threads.
+ base::Lock lock_;
+ VideoRendererSink::RenderCallback* callback_;
DISALLOW_COPY_AND_ASSIGN(VideoFrameCompositor);
};
diff --git a/chromium/media/blink/video_frame_compositor_unittest.cc b/chromium/media/blink/video_frame_compositor_unittest.cc
index 4bceb242217..9394edf79fc 100644
--- a/chromium/media/blink/video_frame_compositor_unittest.cc
+++ b/chromium/media/blink/video_frame_compositor_unittest.cc
@@ -3,18 +3,33 @@
// found in the LICENSE file.
#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/simple_test_tick_clock.h"
#include "cc/layers/video_frame_provider.h"
#include "media/base/video_frame.h"
#include "media/blink/video_frame_compositor.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+
namespace media {
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
class VideoFrameCompositorTest : public testing::Test,
- public cc::VideoFrameProvider::Client {
+ public cc::VideoFrameProvider::Client,
+ public VideoRendererSink::RenderCallback {
public:
VideoFrameCompositorTest()
- : compositor_(new VideoFrameCompositor(
+ : tick_clock_(new base::SimpleTestTickClock()),
+ compositor_(new VideoFrameCompositor(
+ message_loop.task_runner(),
base::Bind(&VideoFrameCompositorTest::NaturalSizeChanged,
base::Unretained(this)),
base::Bind(&VideoFrameCompositorTest::OpacityChanged,
@@ -24,10 +39,20 @@ class VideoFrameCompositorTest : public testing::Test,
opacity_changed_count_(0),
opaque_(false) {
compositor_->SetVideoFrameProviderClient(this);
+ compositor_->set_tick_clock_for_testing(
+ scoped_ptr<base::TickClock>(tick_clock_));
+ // Disable background rendering by default.
+ compositor_->set_background_rendering_for_testing(false);
+ }
+
+ ~VideoFrameCompositorTest() override {
+ compositor_->SetVideoFrameProviderClient(nullptr);
}
- virtual ~VideoFrameCompositorTest() {
- compositor_->SetVideoFrameProviderClient(NULL);
+ scoped_refptr<VideoFrame> CreateOpaqueFrame() {
+ gfx::Size size(8, 8);
+ return VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size),
+ size, base::TimeDelta());
}
VideoFrameCompositor* compositor() { return compositor_.get(); }
@@ -38,13 +63,20 @@ class VideoFrameCompositorTest : public testing::Test,
int opacity_changed_count() { return opacity_changed_count_; }
bool opaque() { return opaque_; }
- private:
+ protected:
// cc::VideoFrameProvider::Client implementation.
- virtual void StopUsingProvider() override {}
- virtual void DidReceiveFrame() override {
- ++did_receive_frame_count_;
- }
- virtual void DidUpdateMatrix(const float* matrix) override {}
+ void StopUsingProvider() override {}
+ MOCK_METHOD0(StartRendering, void());
+ MOCK_METHOD0(StopRendering, void());
+ void DidReceiveFrame() override { ++did_receive_frame_count_; }
+ void DidUpdateMatrix(const float* matrix) override {}
+
+ // VideoRendererSink::RenderCallback implementation.
+ MOCK_METHOD3(Render,
+ scoped_refptr<VideoFrame>(base::TimeTicks,
+ base::TimeTicks,
+ bool));
+ MOCK_METHOD0(OnFrameDropped, void());
void NaturalSizeChanged(gfx::Size natural_size) {
++natural_size_changed_count_;
@@ -56,7 +88,34 @@ class VideoFrameCompositorTest : public testing::Test,
opaque_ = opaque;
}
+ void StartVideoRendererSink() {
+ EXPECT_CALL(*this, StartRendering());
+ const bool had_current_frame = !!compositor_->GetCurrentFrame();
+ compositor()->Start(this);
+ // If we previously had a frame, we should still have one now.
+ EXPECT_EQ(had_current_frame, !!compositor_->GetCurrentFrame());
+ message_loop.RunUntilIdle();
+ }
+
+ void StopVideoRendererSink(bool have_client) {
+ if (have_client)
+ EXPECT_CALL(*this, StopRendering());
+ const bool had_current_frame = !!compositor_->GetCurrentFrame();
+ compositor()->Stop();
+ // If we previously had a frame, we should still have one now.
+ EXPECT_EQ(had_current_frame, !!compositor_->GetCurrentFrame());
+ message_loop.RunUntilIdle();
+ }
+
+ void RenderFrame() {
+ compositor()->GetCurrentFrame();
+ compositor()->PutCurrentFrame();
+ }
+
+ base::MessageLoop message_loop;
+ base::SimpleTestTickClock* tick_clock_; // Owned by |compositor_|
scoped_ptr<VideoFrameCompositor> compositor_;
+
int did_receive_frame_count_;
int natural_size_changed_count_;
gfx::Size natural_size_;
@@ -70,12 +129,12 @@ TEST_F(VideoFrameCompositorTest, InitialValues) {
EXPECT_FALSE(compositor()->GetCurrentFrame().get());
}
-TEST_F(VideoFrameCompositorTest, UpdateCurrentFrame) {
+TEST_F(VideoFrameCompositorTest, PaintFrameUsingOldRenderingPath) {
scoped_refptr<VideoFrame> expected = VideoFrame::CreateEOSFrame();
// Should notify compositor synchronously.
EXPECT_EQ(0, did_receive_frame_count());
- compositor()->UpdateCurrentFrame(expected);
+ compositor()->PaintFrameUsingOldRenderingPath(expected);
scoped_refptr<VideoFrame> actual = compositor()->GetCurrentFrame();
EXPECT_EQ(expected, actual);
EXPECT_EQ(1, did_receive_frame_count());
@@ -90,71 +149,260 @@ TEST_F(VideoFrameCompositorTest, NaturalSizeChanged) {
scoped_refptr<VideoFrame> larger_frame =
VideoFrame::CreateBlackFrame(larger_size);
+ gfx::Size empty_size(0, 0);
+
// Initial expectations.
- EXPECT_EQ(0, natural_size().width());
- EXPECT_EQ(0, natural_size().height());
+ EXPECT_EQ(empty_size, natural_size());
EXPECT_EQ(0, natural_size_changed_count());
// Callback isn't fired for the first frame.
- compositor()->UpdateCurrentFrame(initial_frame);
- EXPECT_EQ(0, natural_size().width());
- EXPECT_EQ(0, natural_size().height());
+ compositor()->PaintFrameUsingOldRenderingPath(initial_frame);
+ EXPECT_EQ(empty_size, natural_size());
EXPECT_EQ(0, natural_size_changed_count());
// Callback should be fired once.
- compositor()->UpdateCurrentFrame(larger_frame);
- EXPECT_EQ(larger_size.width(), natural_size().width());
- EXPECT_EQ(larger_size.height(), natural_size().height());
+ compositor()->PaintFrameUsingOldRenderingPath(larger_frame);
+ EXPECT_EQ(larger_size, natural_size());
EXPECT_EQ(1, natural_size_changed_count());
- compositor()->UpdateCurrentFrame(larger_frame);
- EXPECT_EQ(larger_size.width(), natural_size().width());
- EXPECT_EQ(larger_size.height(), natural_size().height());
+ compositor()->PaintFrameUsingOldRenderingPath(larger_frame);
+ EXPECT_EQ(larger_size, natural_size());
EXPECT_EQ(1, natural_size_changed_count());
// Callback is fired once more when switching back to initial size.
- compositor()->UpdateCurrentFrame(initial_frame);
- EXPECT_EQ(initial_size.width(), natural_size().width());
- EXPECT_EQ(initial_size.height(), natural_size().height());
+ compositor()->PaintFrameUsingOldRenderingPath(initial_frame);
+ EXPECT_EQ(initial_size, natural_size());
+ EXPECT_EQ(2, natural_size_changed_count());
+
+ compositor()->PaintFrameUsingOldRenderingPath(initial_frame);
+ EXPECT_EQ(initial_size, natural_size());
EXPECT_EQ(2, natural_size_changed_count());
- compositor()->UpdateCurrentFrame(initial_frame);
- EXPECT_EQ(initial_size.width(), natural_size().width());
+ natural_size_changed_count_ = 0;
+ natural_size_ = empty_size;
+ compositor()->clear_current_frame_for_testing();
+
+ EXPECT_CALL(*this, Render(_, _, _))
+ .WillOnce(Return(initial_frame))
+ .WillOnce(Return(larger_frame))
+ .WillOnce(Return(initial_frame))
+ .WillOnce(Return(initial_frame));
+ StartVideoRendererSink();
+
+ // Starting the sink will issue one Render() call, ensure the callback isn't
+ // fired for the first frame.
+ EXPECT_EQ(0, natural_size_changed_count());
+ EXPECT_EQ(empty_size, natural_size());
+
+ // Once another frame is received with a different size it should fire.
+ EXPECT_TRUE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+ RenderFrame();
+ EXPECT_EQ(larger_size, natural_size());
+ EXPECT_EQ(1, natural_size_changed_count());
+
+ EXPECT_TRUE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+ RenderFrame();
EXPECT_EQ(initial_size, natural_size());
EXPECT_EQ(2, natural_size_changed_count());
+
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+ EXPECT_EQ(initial_size, natural_size());
+ EXPECT_EQ(2, natural_size_changed_count());
+ RenderFrame();
+
+ StopVideoRendererSink(true);
}
TEST_F(VideoFrameCompositorTest, OpacityChanged) {
gfx::Size size(8, 8);
- gfx::Rect rect(gfx::Point(0, 0), size);
- scoped_refptr<VideoFrame> opaque_frame = VideoFrame::CreateFrame(
- VideoFrame::YV12, size, rect, size, base::TimeDelta());
+ scoped_refptr<VideoFrame> opaque_frame = CreateOpaqueFrame();
scoped_refptr<VideoFrame> not_opaque_frame = VideoFrame::CreateFrame(
- VideoFrame::YV12A, size, rect, size, base::TimeDelta());
+ VideoFrame::YV12A, size, gfx::Rect(size), size, base::TimeDelta());
// Initial expectations.
EXPECT_FALSE(opaque());
EXPECT_EQ(0, opacity_changed_count());
// Callback is fired for the first frame.
- compositor()->UpdateCurrentFrame(not_opaque_frame);
+ compositor()->PaintFrameUsingOldRenderingPath(not_opaque_frame);
EXPECT_FALSE(opaque());
EXPECT_EQ(1, opacity_changed_count());
// Callback shouldn't be first subsequent times with same opaqueness.
- compositor()->UpdateCurrentFrame(not_opaque_frame);
+ compositor()->PaintFrameUsingOldRenderingPath(not_opaque_frame);
EXPECT_FALSE(opaque());
EXPECT_EQ(1, opacity_changed_count());
// Callback is fired when using opacity changes.
- compositor()->UpdateCurrentFrame(opaque_frame);
+ compositor()->PaintFrameUsingOldRenderingPath(opaque_frame);
EXPECT_TRUE(opaque());
EXPECT_EQ(2, opacity_changed_count());
// Callback shouldn't be first subsequent times with same opaqueness.
- compositor()->UpdateCurrentFrame(opaque_frame);
+ compositor()->PaintFrameUsingOldRenderingPath(opaque_frame);
+ EXPECT_TRUE(opaque());
+ EXPECT_EQ(2, opacity_changed_count());
+
+ opacity_changed_count_ = 0;
+ compositor()->clear_current_frame_for_testing();
+
+ EXPECT_CALL(*this, Render(_, _, _))
+ .WillOnce(Return(not_opaque_frame))
+ .WillOnce(Return(not_opaque_frame))
+ .WillOnce(Return(opaque_frame))
+ .WillOnce(Return(opaque_frame));
+ StartVideoRendererSink();
+ EXPECT_FALSE(opaque());
+ EXPECT_EQ(1, opacity_changed_count());
+
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+ RenderFrame();
+ EXPECT_FALSE(opaque());
+ EXPECT_EQ(1, opacity_changed_count());
+
+ EXPECT_TRUE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+ RenderFrame();
EXPECT_TRUE(opaque());
EXPECT_EQ(2, opacity_changed_count());
+
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+ EXPECT_TRUE(opaque());
+ EXPECT_EQ(2, opacity_changed_count());
+ RenderFrame();
+
+ StopVideoRendererSink(true);
+}
+
+TEST_F(VideoFrameCompositorTest, VideoRendererSinkFrameDropped) {
+ scoped_refptr<VideoFrame> opaque_frame = CreateOpaqueFrame();
+
+ EXPECT_CALL(*this, Render(_, _, _)).WillRepeatedly(Return(opaque_frame));
+ StartVideoRendererSink();
+
+ // The first UpdateCurrentFrame() after a background render, which starting
+ // the sink does automatically, won't report a dropped frame.
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+
+ // Another call should trigger a dropped frame callback.
+ EXPECT_CALL(*this, OnFrameDropped());
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+
+ // Ensure it always happens until the frame is rendered.
+ EXPECT_CALL(*this, OnFrameDropped());
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+
+ // Call GetCurrentFrame() but not PutCurrentFrame()
+ compositor()->GetCurrentFrame();
+
+ // The frame should still register as dropped until PutCurrentFrame is called.
+ EXPECT_CALL(*this, OnFrameDropped());
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+
+ RenderFrame();
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+
+ StopVideoRendererSink(true);
+}
+
+TEST_F(VideoFrameCompositorTest, VideoLayerShutdownWhileRendering) {
+ EXPECT_CALL(*this, Render(_, _, true)).WillOnce(Return(nullptr));
+ StartVideoRendererSink();
+ compositor_->SetVideoFrameProviderClient(nullptr);
+ StopVideoRendererSink(false);
+}
+
+TEST_F(VideoFrameCompositorTest, StartFiresBackgroundRender) {
+ scoped_refptr<VideoFrame> opaque_frame = CreateOpaqueFrame();
+ EXPECT_CALL(*this, Render(_, _, true)).WillRepeatedly(Return(opaque_frame));
+ StartVideoRendererSink();
+ StopVideoRendererSink(true);
+}
+
+TEST_F(VideoFrameCompositorTest, BackgroundRenderTicks) {
+ scoped_refptr<VideoFrame> opaque_frame = CreateOpaqueFrame();
+ compositor_->set_background_rendering_for_testing(true);
+
+ base::RunLoop run_loop;
+ EXPECT_CALL(*this, Render(_, _, true))
+ .WillOnce(Return(opaque_frame))
+ .WillOnce(
+ DoAll(RunClosure(run_loop.QuitClosure()), Return(opaque_frame)));
+ StartVideoRendererSink();
+ run_loop.Run();
+
+ // UpdateCurrentFrame() calls should indicate they are not synthetic.
+ EXPECT_CALL(*this, Render(_, _, false)).WillOnce(Return(opaque_frame));
+ EXPECT_FALSE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+
+ // Background rendering should tick another render callback.
+ StopVideoRendererSink(true);
+}
+
+TEST_F(VideoFrameCompositorTest, GetCurrentFrameAndUpdateIfStale) {
+ scoped_refptr<VideoFrame> opaque_frame_1 = CreateOpaqueFrame();
+ scoped_refptr<VideoFrame> opaque_frame_2 = CreateOpaqueFrame();
+ compositor_->set_background_rendering_for_testing(true);
+
+ // |current_frame_| should be null at this point since we don't have a client
+ // or a callback.
+ ASSERT_FALSE(compositor()->GetCurrentFrameAndUpdateIfStale());
+
+ // Starting the video renderer should return a single frame.
+ EXPECT_CALL(*this, Render(_, _, true)).WillOnce(Return(opaque_frame_1));
+ StartVideoRendererSink();
+
+ // Since we have a client, this call should not call background render, even
+ // if a lot of time has elapsed between calls.
+ tick_clock_->Advance(base::TimeDelta::FromSeconds(1));
+ ASSERT_EQ(opaque_frame_1, compositor()->GetCurrentFrameAndUpdateIfStale());
+
+ // An update current frame call should stop background rendering.
+ EXPECT_CALL(*this, Render(_, _, false)).WillOnce(Return(opaque_frame_2));
+ EXPECT_TRUE(
+ compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
+
+ // This call should still not call background render.
+ ASSERT_EQ(opaque_frame_2, compositor()->GetCurrentFrameAndUpdateIfStale());
+
+ testing::Mock::VerifyAndClearExpectations(this);
+
+ // Clear our client, which means no mock function calls for Client.
+ compositor()->SetVideoFrameProviderClient(nullptr);
+
+ // This call should still not call background render, because we aren't in the
+ // background rendering state yet.
+ ASSERT_EQ(opaque_frame_2, compositor()->GetCurrentFrameAndUpdateIfStale());
+
+ // Wait for background rendering to tick again.
+ base::RunLoop run_loop;
+ EXPECT_CALL(*this, Render(_, _, true))
+ .WillOnce(
+ DoAll(RunClosure(run_loop.QuitClosure()), Return(opaque_frame_1)))
+ .WillOnce(Return(opaque_frame_2));
+ run_loop.Run();
+
+ // This call should still not call background render, because not enough time
+ // has elapsed since the last background render call.
+ ASSERT_EQ(opaque_frame_1, compositor()->GetCurrentFrameAndUpdateIfStale());
+
+ // Advancing the tick clock should allow a new frame to be requested.
+ tick_clock_->Advance(base::TimeDelta::FromMilliseconds(10));
+ ASSERT_EQ(opaque_frame_2, compositor()->GetCurrentFrameAndUpdateIfStale());
+
+ // Background rendering should tick another render callback.
+ StopVideoRendererSink(false);
}
} // namespace media
diff --git a/chromium/media/blink/webcontentdecryptionmodule_impl.cc b/chromium/media/blink/webcontentdecryptionmodule_impl.cc
new file mode 100644
index 00000000000..cc2d50a8c49
--- /dev/null
+++ b/chromium/media/blink/webcontentdecryptionmodule_impl.cc
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "webcontentdecryptionmodule_impl.h"
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "media/base/cdm_promise.h"
+#include "media/base/key_systems.h"
+#include "media/base/media_keys.h"
+#include "media/blink/cdm_result_promise.h"
+#include "media/blink/cdm_session_adapter.h"
+#include "media/blink/webcontentdecryptionmodulesession_impl.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+#include "third_party/WebKit/public/web/WebSecurityOrigin.h"
+#include "url/gurl.h"
+
+namespace media {
+
+void WebContentDecryptionModuleImpl::Create(
+ media::CdmFactory* cdm_factory,
+ const base::string16& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const CdmConfig& cdm_config,
+ blink::WebContentDecryptionModuleResult result) {
+ DCHECK(!security_origin.isNull());
+ DCHECK(!key_system.empty());
+
+ // TODO(ddorwin): Guard against this in supported types check and remove this.
+ // Chromium only supports ASCII key systems.
+ if (!base::IsStringASCII(key_system)) {
+ NOTREACHED();
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
+ "Invalid keysystem.");
+ return;
+ }
+
+ // TODO(ddorwin): This should be a DCHECK.
+ std::string key_system_ascii = base::UTF16ToASCII(key_system);
+ if (!media::IsSupportedKeySystem(key_system_ascii)) {
+ std::string message =
+ "Keysystem '" + key_system_ascii + "' is not supported.";
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
+ blink::WebString::fromUTF8(message));
+ return;
+ }
+
+ // If unique security origin, don't try to create the CDM.
+ if (security_origin.isUnique() || security_origin.toString() == "null") {
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
+ "CDM use not allowed for unique security origin.");
+ return;
+ }
+
+ GURL security_origin_as_gurl(security_origin.toString());
+
+ // CdmSessionAdapter::CreateCdm() will keep a reference to |adapter|. Then
+ // if WebContentDecryptionModuleImpl is successfully created (returned in
+ // |result|), it will keep a reference to |adapter|. Otherwise, |adapter| will
+ // be destructed.
+ scoped_refptr<CdmSessionAdapter> adapter(new CdmSessionAdapter());
+ adapter->CreateCdm(cdm_factory, key_system_ascii, security_origin_as_gurl,
+ cdm_config, result);
+}
+
+WebContentDecryptionModuleImpl::WebContentDecryptionModuleImpl(
+ scoped_refptr<CdmSessionAdapter> adapter)
+ : adapter_(adapter) {
+}
+
+WebContentDecryptionModuleImpl::~WebContentDecryptionModuleImpl() {
+}
+
+// The caller owns the created session.
+blink::WebContentDecryptionModuleSession*
+WebContentDecryptionModuleImpl::createSession() {
+ return adapter_->CreateSession();
+}
+
+void WebContentDecryptionModuleImpl::setServerCertificate(
+ const uint8* server_certificate,
+ size_t server_certificate_length,
+ blink::WebContentDecryptionModuleResult result) {
+ DCHECK(server_certificate);
+ adapter_->SetServerCertificate(
+ std::vector<uint8>(server_certificate,
+ server_certificate + server_certificate_length),
+ scoped_ptr<SimpleCdmPromise>(
+ new CdmResultPromise<>(result, std::string())));
+}
+
+CdmContext* WebContentDecryptionModuleImpl::GetCdmContext() {
+ return adapter_->GetCdmContext();
+}
+
+} // namespace media
diff --git a/chromium/media/blink/webcontentdecryptionmodule_impl.h b/chromium/media/blink/webcontentdecryptionmodule_impl.h
new file mode 100644
index 00000000000..fefc1db6171
--- /dev/null
+++ b/chromium/media/blink/webcontentdecryptionmodule_impl.h
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULE_IMPL_H_
+#define MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULE_IMPL_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string16.h"
+#include "media/base/media_export.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModule.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
+
+namespace blink {
+#if defined(ENABLE_PEPPER_CDMS)
+class WebLocalFrame;
+#endif
+class WebSecurityOrigin;
+}
+
+namespace media {
+
+struct CdmConfig;
+class CdmContext;
+class CdmFactory;
+class CdmSessionAdapter;
+class WebContentDecryptionModuleSessionImpl;
+
+class MEDIA_EXPORT WebContentDecryptionModuleImpl
+ : public blink::WebContentDecryptionModule {
+ public:
+ static void Create(CdmFactory* cdm_factory,
+ const base::string16& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const CdmConfig& cdm_config,
+ blink::WebContentDecryptionModuleResult result);
+
+ virtual ~WebContentDecryptionModuleImpl();
+
+ // blink::WebContentDecryptionModule implementation.
+ virtual blink::WebContentDecryptionModuleSession* createSession();
+
+ virtual void setServerCertificate(
+ const uint8* server_certificate,
+ size_t server_certificate_length,
+ blink::WebContentDecryptionModuleResult result);
+
+ // Returns the CdmContext associated with this CDM, which must not be nullptr.
+ // TODO(jrummell): Figure out lifetimes, as WMPI may still use the decryptor
+ // after WebContentDecryptionModule is freed. http://crbug.com/330324
+ CdmContext* GetCdmContext();
+
+ private:
+ friend CdmSessionAdapter;
+
+ // Takes reference to |adapter|.
+ WebContentDecryptionModuleImpl(scoped_refptr<CdmSessionAdapter> adapter);
+
+ scoped_refptr<CdmSessionAdapter> adapter_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebContentDecryptionModuleImpl);
+};
+
+// Allow typecasting from blink type as this is the only implementation.
+inline WebContentDecryptionModuleImpl* ToWebContentDecryptionModuleImpl(
+ blink::WebContentDecryptionModule* cdm) {
+ return static_cast<WebContentDecryptionModuleImpl*>(cdm);
+}
+
+} // namespace media
+
+#endif // MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULE_IMPL_H_
diff --git a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
new file mode 100644
index 00000000000..01f14ed4c14
--- /dev/null
+++ b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
@@ -0,0 +1,77 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/blink/webcontentdecryptionmoduleaccess_impl.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
+#include "media/blink/webencryptedmediaclient_impl.h"
+
+namespace media {
+
+// The caller owns the created cdm (passed back using |result|).
+static void CreateCdm(const base::WeakPtr<WebEncryptedMediaClientImpl>& client,
+ const blink::WebString& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const CdmConfig& cdm_config,
+ blink::WebContentDecryptionModuleResult result) {
+ // If |client| is gone (due to the frame getting destroyed), it is
+ // impossible to create the CDM, so fail.
+ if (!client) {
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionInvalidStateError, 0,
+ "Failed to create CDM.");
+ return;
+ }
+
+ client->CreateCdm(key_system, security_origin, cdm_config, result);
+}
+
+WebContentDecryptionModuleAccessImpl*
+WebContentDecryptionModuleAccessImpl::Create(
+ const blink::WebString& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const blink::WebMediaKeySystemConfiguration& configuration,
+ const CdmConfig& cdm_config,
+ const base::WeakPtr<WebEncryptedMediaClientImpl>& client) {
+ return new WebContentDecryptionModuleAccessImpl(
+ key_system, security_origin, configuration, cdm_config, client);
+}
+
+WebContentDecryptionModuleAccessImpl::WebContentDecryptionModuleAccessImpl(
+ const blink::WebString& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const blink::WebMediaKeySystemConfiguration& configuration,
+ const CdmConfig& cdm_config,
+ const base::WeakPtr<WebEncryptedMediaClientImpl>& client)
+ : key_system_(key_system),
+ security_origin_(security_origin),
+ configuration_(configuration),
+ cdm_config_(cdm_config),
+ client_(client) {
+}
+
+WebContentDecryptionModuleAccessImpl::~WebContentDecryptionModuleAccessImpl() {
+}
+
+blink::WebMediaKeySystemConfiguration
+WebContentDecryptionModuleAccessImpl::getConfiguration() {
+ return configuration_;
+}
+
+void WebContentDecryptionModuleAccessImpl::createContentDecryptionModule(
+ blink::WebContentDecryptionModuleResult result) {
+ // This method needs to run asynchronously, as it may need to load the CDM.
+ // As this object's lifetime is controlled by MediaKeySystemAccess on the
+ // blink side, copy all values needed by CreateCdm() in case the blink object
+ // gets garbage-collected.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&CreateCdm, client_, key_system_, security_origin_,
+ cdm_config_, result));
+}
+
+} // namespace media
diff --git a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
new file mode 100644
index 00000000000..5f156984e19
--- /dev/null
+++ b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
@@ -0,0 +1,57 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULEACCESS_IMPL_H_
+#define MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULEACCESS_IMPL_H_
+
+#include "base/memory/weak_ptr.h"
+#include "media/base/cdm_config.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleAccess.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
+#include "third_party/WebKit/public/platform/WebMediaKeySystemConfiguration.h"
+#include "third_party/WebKit/public/platform/WebSecurityOrigin.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+
+namespace media {
+
+class WebEncryptedMediaClientImpl;
+
+class WebContentDecryptionModuleAccessImpl
+ : public blink::WebContentDecryptionModuleAccess {
+ public:
+ static WebContentDecryptionModuleAccessImpl* Create(
+ const blink::WebString& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const blink::WebMediaKeySystemConfiguration& configuration,
+ const CdmConfig& cdm_config,
+ const base::WeakPtr<WebEncryptedMediaClientImpl>& client);
+ virtual ~WebContentDecryptionModuleAccessImpl();
+
+ // blink::WebContentDecryptionModuleAccess interface.
+ virtual blink::WebMediaKeySystemConfiguration getConfiguration();
+ virtual void createContentDecryptionModule(
+ blink::WebContentDecryptionModuleResult result);
+
+ private:
+ WebContentDecryptionModuleAccessImpl(
+ const blink::WebString& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const blink::WebMediaKeySystemConfiguration& configuration,
+ const CdmConfig& cdm_config,
+ const base::WeakPtr<WebEncryptedMediaClientImpl>& client);
+
+ const blink::WebString key_system_;
+ const blink::WebSecurityOrigin security_origin_;
+ const blink::WebMediaKeySystemConfiguration configuration_;
+ const CdmConfig cdm_config_;
+
+ // Keep a WeakPtr as client is owned by render_frame_impl.
+ base::WeakPtr<WebEncryptedMediaClientImpl> client_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebContentDecryptionModuleAccessImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULEACCESS_IMPL_H_
diff --git a/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc b/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
new file mode 100644
index 00000000000..c80a1d52d55
--- /dev/null
+++ b/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
@@ -0,0 +1,417 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "webcontentdecryptionmodulesession_impl.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "media/base/cdm_key_information.h"
+#include "media/base/cdm_promise.h"
+#include "media/base/key_systems.h"
+#include "media/base/limits.h"
+#include "media/base/media_keys.h"
+#include "media/blink/cdm_result_promise.h"
+#include "media/blink/cdm_session_adapter.h"
+#include "media/blink/new_session_cdm_result_promise.h"
+#include "media/blink/webmediaplayer_util.h"
+#include "media/cdm/cenc_utils.h"
+#include "media/cdm/json_web_key.h"
+#include "media/cdm/key_system_names.h"
+#include "third_party/WebKit/public/platform/WebData.h"
+#include "third_party/WebKit/public/platform/WebEncryptedMediaKeyInformation.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+#include "third_party/WebKit/public/platform/WebURL.h"
+#include "third_party/WebKit/public/platform/WebVector.h"
+
+namespace media {
+
+const char kCloseSessionUMAName[] = "CloseSession";
+const char kGenerateRequestUMAName[] = "GenerateRequest";
+const char kLoadSessionUMAName[] = "LoadSession";
+const char kRemoveSessionUMAName[] = "RemoveSession";
+const char kUpdateSessionUMAName[] = "UpdateSession";
+
+static blink::WebContentDecryptionModuleSession::Client::MessageType
+convertMessageType(MediaKeys::MessageType message_type) {
+ switch (message_type) {
+ case media::MediaKeys::LICENSE_REQUEST:
+ return blink::WebContentDecryptionModuleSession::Client::MessageType::
+ LicenseRequest;
+ case media::MediaKeys::LICENSE_RENEWAL:
+ return blink::WebContentDecryptionModuleSession::Client::MessageType::
+ LicenseRenewal;
+ case media::MediaKeys::LICENSE_RELEASE:
+ return blink::WebContentDecryptionModuleSession::Client::MessageType::
+ LicenseRelease;
+ }
+
+ NOTREACHED();
+ return blink::WebContentDecryptionModuleSession::Client::MessageType::
+ LicenseRequest;
+}
+
+static blink::WebEncryptedMediaKeyInformation::KeyStatus convertStatus(
+ media::CdmKeyInformation::KeyStatus status) {
+ switch (status) {
+ case media::CdmKeyInformation::USABLE:
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::Usable;
+ case media::CdmKeyInformation::INTERNAL_ERROR:
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::InternalError;
+ case media::CdmKeyInformation::EXPIRED:
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::Expired;
+ case media::CdmKeyInformation::OUTPUT_NOT_ALLOWED:
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::
+ OutputNotAllowed;
+ case media::CdmKeyInformation::OUTPUT_DOWNSCALED:
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::
+ OutputDownscaled;
+ case media::CdmKeyInformation::KEY_STATUS_PENDING:
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::StatusPending;
+ }
+
+ NOTREACHED();
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::InternalError;
+}
+
+static MediaKeys::SessionType convertSessionType(
+ blink::WebEncryptedMediaSessionType session_type) {
+ switch (session_type) {
+ case blink::WebEncryptedMediaSessionType::Temporary:
+ return MediaKeys::TEMPORARY_SESSION;
+ case blink::WebEncryptedMediaSessionType::PersistentLicense:
+ return MediaKeys::PERSISTENT_LICENSE_SESSION;
+ case blink::WebEncryptedMediaSessionType::PersistentReleaseMessage:
+ return MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION;
+ case blink::WebEncryptedMediaSessionType::Unknown:
+ break;
+ }
+
+ NOTREACHED();
+ return MediaKeys::TEMPORARY_SESSION;
+}
+
+static bool SanitizeInitData(EmeInitDataType init_data_type,
+ const unsigned char* init_data,
+ size_t init_data_length,
+ std::vector<uint8>* sanitized_init_data,
+ std::string* error_message) {
+ if (init_data_length > limits::kMaxInitDataLength) {
+ error_message->assign("Initialization data too long.");
+ return false;
+ }
+
+ switch (init_data_type) {
+ case EmeInitDataType::WEBM:
+ sanitized_init_data->assign(init_data, init_data + init_data_length);
+ return true;
+
+ case EmeInitDataType::CENC:
+ sanitized_init_data->assign(init_data, init_data + init_data_length);
+ if (!ValidatePsshInput(*sanitized_init_data)) {
+ error_message->assign("Initialization data for CENC is incorrect.");
+ return false;
+ }
+ return true;
+
+ case EmeInitDataType::KEYIDS: {
+ // Extract the keys and then rebuild the message. This ensures that any
+ // extra data in the provided JSON is dropped.
+ std::string init_data_string(init_data, init_data + init_data_length);
+ KeyIdList key_ids;
+ if (!ExtractKeyIdsFromKeyIdsInitData(init_data_string, &key_ids,
+ error_message))
+ return false;
+
+ for (const auto& key_id : key_ids) {
+ if (key_id.size() < limits::kMinKeyIdLength ||
+ key_id.size() > limits::kMaxKeyIdLength) {
+ error_message->assign("Incorrect key size.");
+ return false;
+ }
+ }
+
+ CreateKeyIdsInitData(key_ids, sanitized_init_data);
+ return true;
+ }
+
+ case EmeInitDataType::UNKNOWN:
+ break;
+ }
+
+ NOTREACHED();
+ error_message->assign("Initialization data type is not supported.");
+ return false;
+}
+
+static bool SanitizeSessionId(const blink::WebString& session_id,
+ std::string* sanitized_session_id) {
+ // The user agent should thoroughly validate the sessionId value before
+ // passing it to the CDM. At a minimum, this should include checking that
+ // the length and value (e.g. alphanumeric) are reasonable.
+ if (!base::IsStringASCII(session_id))
+ return false;
+
+ sanitized_session_id->assign(base::UTF16ToASCII(session_id));
+ if (sanitized_session_id->length() > limits::kMaxSessionIdLength)
+ return false;
+
+ for (const char c : *sanitized_session_id) {
+ if (!IsAsciiAlpha(c) && !IsAsciiDigit(c))
+ return false;
+ }
+
+ return true;
+}
+
+static bool SanitizeResponse(const std::string& key_system,
+ const uint8* response,
+ size_t response_length,
+ std::vector<uint8>* sanitized_response) {
+ // The user agent should thoroughly validate the response before passing it
+ // to the CDM. This may include verifying values are within reasonable limits,
+ // stripping irrelevant data or fields, pre-parsing it, sanitizing it,
+ // and/or generating a fully sanitized version. The user agent should check
+ // that the length and values of fields are reasonable. Unknown fields should
+ // be rejected or removed.
+ if (response_length > limits::kMaxSessionResponseLength)
+ return false;
+
+ if (IsClearKey(key_system) || IsExternalClearKey(key_system)) {
+ std::string key_string(response, response + response_length);
+ KeyIdAndKeyPairs keys;
+ MediaKeys::SessionType session_type = MediaKeys::TEMPORARY_SESSION;
+ if (!ExtractKeysFromJWKSet(key_string, &keys, &session_type))
+ return false;
+
+ // Must contain at least one key.
+ if (keys.empty())
+ return false;
+
+ for (const auto key_pair : keys) {
+ if (key_pair.first.size() < limits::kMinKeyIdLength ||
+ key_pair.first.size() > limits::kMaxKeyIdLength) {
+ return false;
+ }
+ }
+
+ std::string sanitized_data = GenerateJWKSet(keys, session_type);
+ sanitized_response->assign(sanitized_data.begin(), sanitized_data.end());
+ return true;
+ }
+
+ // TODO(jrummell): Verify responses for Widevine.
+ sanitized_response->assign(response, response + response_length);
+ return true;
+}
+
+WebContentDecryptionModuleSessionImpl::WebContentDecryptionModuleSessionImpl(
+ const scoped_refptr<CdmSessionAdapter>& adapter)
+ : adapter_(adapter), is_closed_(false), weak_ptr_factory_(this) {
+}
+
+WebContentDecryptionModuleSessionImpl::
+ ~WebContentDecryptionModuleSessionImpl() {
+ if (!session_id_.empty())
+ adapter_->UnregisterSession(session_id_);
+}
+
+void WebContentDecryptionModuleSessionImpl::setClientInterface(Client* client) {
+ client_ = client;
+}
+
+blink::WebString WebContentDecryptionModuleSessionImpl::sessionId() const {
+ return blink::WebString::fromUTF8(session_id_);
+}
+
+void WebContentDecryptionModuleSessionImpl::initializeNewSession(
+ blink::WebEncryptedMediaInitDataType init_data_type,
+ const unsigned char* init_data,
+ size_t init_data_length,
+ blink::WebEncryptedMediaSessionType session_type,
+ blink::WebContentDecryptionModuleResult result) {
+ DCHECK(init_data);
+ DCHECK(session_id_.empty());
+
+ // From https://w3c.github.io/encrypted-media/#generateRequest.
+ // 5. If the Key System implementation represented by this object's cdm
+ // implementation value does not support initDataType as an Initialization
+ // Data Type, return a promise rejected with a new DOMException whose name
+ // is NotSupportedError. String comparison is case-sensitive.
+ EmeInitDataType eme_init_data_type = ConvertToEmeInitDataType(init_data_type);
+ if (!IsSupportedKeySystemWithInitDataType(adapter_->GetKeySystem(),
+ eme_init_data_type)) {
+ std::string message =
+ "The initialization data type is not supported by the key system.";
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
+ blink::WebString::fromUTF8(message));
+ return;
+ }
+
+ // 9.1 If the init data is not valid for initDataType, reject promise with a
+ // new DOMException whose name is InvalidAccessError.
+ // 9.2 Let sanitized init data be a validated and sanitized version of init
+ // data. The user agent must thoroughly validate the Initialization Data
+ // before passing it to the CDM. This includes verifying that the length
+ // and values of fields are reasonable, verifying that values are within
+ // reasonable limits, and stripping irrelevant, unsupported, or unknown
+ // data or fields. It is recommended that user agents pre-parse, sanitize,
+ // and/or generate a fully sanitized version of the Initialization Data.
+ // If the Initialization Data format specified by initDataType support
+ // multiple entries, the user agent should remove entries that are not
+ // needed by the CDM.
+ // 9.3 If the previous step failed, reject promise with a new DOMException
+ // whose name is InvalidAccessError.
+ std::vector<uint8> sanitized_init_data;
+ std::string message;
+ if (!SanitizeInitData(eme_init_data_type, init_data, init_data_length,
+ &sanitized_init_data, &message)) {
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionInvalidAccessError, 0,
+ blink::WebString::fromUTF8(message));
+ return;
+ }
+
+ // 9.4 Let session id be the empty string.
+ // (Done in constructor.)
+
+ // 9.5 Let message be null.
+ // (Done by CDM.)
+
+ // 9.6 Let cdm be the CDM instance represented by this object's cdm
+ // instance value.
+ // 9.7 Use the cdm to execute the following steps:
+ adapter_->InitializeNewSession(
+ eme_init_data_type, sanitized_init_data, convertSessionType(session_type),
+ scoped_ptr<NewSessionCdmPromise>(new NewSessionCdmResultPromise(
+ result, adapter_->GetKeySystemUMAPrefix() + kGenerateRequestUMAName,
+ base::Bind(
+ &WebContentDecryptionModuleSessionImpl::OnSessionInitialized,
+ base::Unretained(this)))));
+}
+
+void WebContentDecryptionModuleSessionImpl::load(
+ const blink::WebString& session_id,
+ blink::WebContentDecryptionModuleResult result) {
+ DCHECK(!session_id.isEmpty());
+ DCHECK(session_id_.empty());
+
+ std::string sanitized_session_id;
+ if (!SanitizeSessionId(session_id, &sanitized_session_id)) {
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionInvalidAccessError, 0,
+ "Invalid session ID.");
+ return;
+ }
+
+ // TODO(jrummell): Now that there are 2 types of persistent sessions, the
+ // session type should be passed from blink. Type should also be passed in the
+ // constructor (and removed from initializeNewSession()).
+ adapter_->LoadSession(
+ MediaKeys::PERSISTENT_LICENSE_SESSION, sanitized_session_id,
+ scoped_ptr<NewSessionCdmPromise>(new NewSessionCdmResultPromise(
+ result, adapter_->GetKeySystemUMAPrefix() + kLoadSessionUMAName,
+ base::Bind(
+ &WebContentDecryptionModuleSessionImpl::OnSessionInitialized,
+ base::Unretained(this)))));
+}
+
+void WebContentDecryptionModuleSessionImpl::update(
+ const uint8* response,
+ size_t response_length,
+ blink::WebContentDecryptionModuleResult result) {
+ DCHECK(response);
+ DCHECK(!session_id_.empty());
+
+ std::vector<uint8> sanitized_response;
+ if (!SanitizeResponse(adapter_->GetKeySystem(), response, response_length,
+ &sanitized_response)) {
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionInvalidAccessError, 0,
+ "Invalid response.");
+ return;
+ }
+
+ adapter_->UpdateSession(
+ session_id_, sanitized_response,
+ scoped_ptr<SimpleCdmPromise>(new CdmResultPromise<>(
+ result, adapter_->GetKeySystemUMAPrefix() + kUpdateSessionUMAName)));
+}
+
+void WebContentDecryptionModuleSessionImpl::close(
+ blink::WebContentDecryptionModuleResult result) {
+ DCHECK(!session_id_.empty());
+ adapter_->CloseSession(
+ session_id_,
+ scoped_ptr<SimpleCdmPromise>(new CdmResultPromise<>(
+ result, adapter_->GetKeySystemUMAPrefix() + kCloseSessionUMAName)));
+}
+
+void WebContentDecryptionModuleSessionImpl::remove(
+ blink::WebContentDecryptionModuleResult result) {
+ DCHECK(!session_id_.empty());
+ adapter_->RemoveSession(
+ session_id_,
+ scoped_ptr<SimpleCdmPromise>(new CdmResultPromise<>(
+ result, adapter_->GetKeySystemUMAPrefix() + kRemoveSessionUMAName)));
+}
+
+void WebContentDecryptionModuleSessionImpl::OnSessionMessage(
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8>& message) {
+ DCHECK(client_) << "Client not set before message event";
+ client_->message(convertMessageType(message_type), vector_as_array(&message),
+ message.size());
+}
+
+void WebContentDecryptionModuleSessionImpl::OnSessionKeysChange(
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info) {
+ blink::WebVector<blink::WebEncryptedMediaKeyInformation> keys(
+ keys_info.size());
+ for (size_t i = 0; i < keys_info.size(); ++i) {
+ const auto& key_info = keys_info[i];
+ keys[i].setId(blink::WebData(reinterpret_cast<char*>(&key_info->key_id[0]),
+ key_info->key_id.size()));
+ keys[i].setStatus(convertStatus(key_info->status));
+ keys[i].setSystemCode(key_info->system_code);
+ }
+
+ // Now send the event to blink.
+ client_->keysStatusesChange(keys, has_additional_usable_key);
+}
+
+void WebContentDecryptionModuleSessionImpl::OnSessionExpirationUpdate(
+ const base::Time& new_expiry_time) {
+ client_->expirationChanged(new_expiry_time.ToJsTime());
+}
+
+void WebContentDecryptionModuleSessionImpl::OnSessionClosed() {
+ if (is_closed_)
+ return;
+
+ is_closed_ = true;
+ client_->close();
+}
+
+blink::WebContentDecryptionModuleResult::SessionStatus
+WebContentDecryptionModuleSessionImpl::OnSessionInitialized(
+ const std::string& session_id) {
+ // CDM will return NULL if the session to be loaded can't be found.
+ if (session_id.empty())
+ return blink::WebContentDecryptionModuleResult::SessionNotFound;
+
+ DCHECK(session_id_.empty()) << "Session ID may not be changed once set.";
+ session_id_ = session_id;
+ return adapter_->RegisterSession(session_id_, weak_ptr_factory_.GetWeakPtr())
+ ? blink::WebContentDecryptionModuleResult::NewSession
+ : blink::WebContentDecryptionModuleResult::SessionAlreadyExists;
+}
+
+} // namespace media
diff --git a/chromium/media/blink/webcontentdecryptionmodulesession_impl.h b/chromium/media/blink/webcontentdecryptionmodulesession_impl.h
new file mode 100644
index 00000000000..2f00ff5b368
--- /dev/null
+++ b/chromium/media/blink/webcontentdecryptionmodulesession_impl.h
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULESESSION_IMPL_H_
+#define MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULESESSION_IMPL_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/media_keys.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleSession.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+
+namespace media {
+
+class CdmSessionAdapter;
+class MediaKeys;
+
+class WebContentDecryptionModuleSessionImpl
+ : public blink::WebContentDecryptionModuleSession {
+ public:
+ WebContentDecryptionModuleSessionImpl(
+ const scoped_refptr<CdmSessionAdapter>& adapter);
+ virtual ~WebContentDecryptionModuleSessionImpl();
+
+ // blink::WebContentDecryptionModuleSession implementation.
+ virtual void setClientInterface(Client* client);
+ virtual blink::WebString sessionId() const;
+
+ virtual void initializeNewSession(
+ blink::WebEncryptedMediaInitDataType init_data_type,
+ const unsigned char* initData,
+ size_t initDataLength,
+ blink::WebEncryptedMediaSessionType session_type,
+ blink::WebContentDecryptionModuleResult result);
+ virtual void load(const blink::WebString& session_id,
+ blink::WebContentDecryptionModuleResult result);
+ virtual void update(const uint8* response,
+ size_t response_length,
+ blink::WebContentDecryptionModuleResult result);
+ virtual void close(blink::WebContentDecryptionModuleResult result);
+ virtual void remove(blink::WebContentDecryptionModuleResult result);
+
+ // Callbacks.
+ void OnSessionMessage(MediaKeys::MessageType message_type,
+ const std::vector<uint8>& message);
+ void OnSessionKeysChange(bool has_additional_usable_key,
+ CdmKeysInfo keys_info);
+ void OnSessionExpirationUpdate(const base::Time& new_expiry_time);
+ void OnSessionClosed();
+
+ private:
+ // Called when a new session is created.
+ blink::WebContentDecryptionModuleResult::SessionStatus OnSessionInitialized(
+ const std::string& session_id);
+
+ scoped_refptr<CdmSessionAdapter> adapter_;
+
+ // Non-owned pointer.
+ Client* client_;
+
+ // Session ID is the app visible ID for this session generated by the CDM.
+ // This value is not set until the CDM resolves the initializeNewSession()
+ // promise.
+ std::string session_id_;
+
+ // Don't pass more than 1 close() event to blink::
+ // TODO(jrummell): Remove this once blink tests handle close() promise and
+ // closed() event.
+ bool is_closed_;
+
+ // Since promises will live until they are fired, use a weak reference when
+ // creating a promise in case this class disappears before the promise
+ // actually fires.
+ base::WeakPtrFactory<WebContentDecryptionModuleSessionImpl> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebContentDecryptionModuleSessionImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_BLINK_WEBCONTENTDECRYPTIONMODULESESSION_IMPL_H_
diff --git a/chromium/media/blink/webencryptedmediaclient_impl.cc b/chromium/media/blink/webencryptedmediaclient_impl.cc
new file mode 100644
index 00000000000..1ef61af946b
--- /dev/null
+++ b/chromium/media/blink/webencryptedmediaclient_impl.cc
@@ -0,0 +1,151 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "webencryptedmediaclient_impl.h"
+
+#include "base/bind.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "media/base/key_systems.h"
+#include "media/base/media_permission.h"
+#include "media/blink/webcontentdecryptionmodule_impl.h"
+#include "media/blink/webcontentdecryptionmoduleaccess_impl.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
+#include "third_party/WebKit/public/platform/WebEncryptedMediaRequest.h"
+#include "third_party/WebKit/public/platform/WebMediaKeySystemConfiguration.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+
+namespace media {
+
+namespace {
+
+// Used to name UMAs in Reporter.
+const char kKeySystemSupportUMAPrefix[] =
+ "Media.EME.RequestMediaKeySystemAccess.";
+
+} // namespace
+
+// Report usage of key system to UMA. There are 2 different counts logged:
+// 1. The key system is requested.
+// 2. The requested key system and options are supported.
+// Each stat is only reported once per renderer frame per key system.
+// Note that WebEncryptedMediaClientImpl is only created once by each
+// renderer frame.
+class WebEncryptedMediaClientImpl::Reporter {
+ public:
+ enum KeySystemSupportStatus {
+ KEY_SYSTEM_REQUESTED = 0,
+ KEY_SYSTEM_SUPPORTED = 1,
+ KEY_SYSTEM_SUPPORT_STATUS_COUNT
+ };
+
+ explicit Reporter(const std::string& key_system_for_uma)
+ : uma_name_(kKeySystemSupportUMAPrefix + key_system_for_uma),
+ is_request_reported_(false),
+ is_support_reported_(false) {}
+ ~Reporter() {}
+
+ void ReportRequested() {
+ if (is_request_reported_)
+ return;
+ Report(KEY_SYSTEM_REQUESTED);
+ is_request_reported_ = true;
+ }
+
+ void ReportSupported() {
+ DCHECK(is_request_reported_);
+ if (is_support_reported_)
+ return;
+ Report(KEY_SYSTEM_SUPPORTED);
+ is_support_reported_ = true;
+ }
+
+ private:
+ void Report(KeySystemSupportStatus status) {
+ // Not using UMA_HISTOGRAM_ENUMERATION directly because UMA_* macros
+ // require the names to be constant throughout the process' lifetime.
+ base::LinearHistogram::FactoryGet(
+ uma_name_, 1, KEY_SYSTEM_SUPPORT_STATUS_COUNT,
+ KEY_SYSTEM_SUPPORT_STATUS_COUNT + 1,
+ base::Histogram::kUmaTargetedHistogramFlag)->Add(status);
+ }
+
+ const std::string uma_name_;
+ bool is_request_reported_;
+ bool is_support_reported_;
+};
+
+WebEncryptedMediaClientImpl::WebEncryptedMediaClientImpl(
+ base::Callback<bool(void)> are_secure_codecs_supported_cb,
+ CdmFactory* cdm_factory,
+ MediaPermission* media_permission)
+ : are_secure_codecs_supported_cb_(are_secure_codecs_supported_cb),
+ cdm_factory_(cdm_factory),
+ key_system_config_selector_(KeySystems::GetInstance(), media_permission),
+ weak_factory_(this) {
+ DCHECK(cdm_factory_);
+}
+
+WebEncryptedMediaClientImpl::~WebEncryptedMediaClientImpl() {
+}
+
+void WebEncryptedMediaClientImpl::requestMediaKeySystemAccess(
+ blink::WebEncryptedMediaRequest request) {
+ GetReporter(request.keySystem())->ReportRequested();
+ key_system_config_selector_.SelectConfig(
+ request.keySystem(), request.supportedConfigurations(),
+ request.securityOrigin(), are_secure_codecs_supported_cb_.Run(),
+ base::Bind(&WebEncryptedMediaClientImpl::OnRequestSucceeded,
+ weak_factory_.GetWeakPtr(), request),
+ base::Bind(&WebEncryptedMediaClientImpl::OnRequestNotSupported,
+ weak_factory_.GetWeakPtr(), request));
+}
+
+void WebEncryptedMediaClientImpl::CreateCdm(
+ const blink::WebString& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const CdmConfig& cdm_config,
+ blink::WebContentDecryptionModuleResult result) {
+ WebContentDecryptionModuleImpl::Create(
+ cdm_factory_, key_system, security_origin, cdm_config, result);
+}
+
+void WebEncryptedMediaClientImpl::OnRequestSucceeded(
+ blink::WebEncryptedMediaRequest request,
+ const blink::WebMediaKeySystemConfiguration& accumulated_configuration,
+ const CdmConfig& cdm_config) {
+ GetReporter(request.keySystem())->ReportSupported();
+ // TODO(sandersd): Pass |are_secure_codecs_required| along and use it to
+ // configure the CDM security level and use of secure surfaces on Android.
+ request.requestSucceeded(WebContentDecryptionModuleAccessImpl::Create(
+ request.keySystem(), request.securityOrigin(), accumulated_configuration,
+ cdm_config, weak_factory_.GetWeakPtr()));
+}
+
+void WebEncryptedMediaClientImpl::OnRequestNotSupported(
+ blink::WebEncryptedMediaRequest request,
+ const blink::WebString& error_message) {
+ request.requestNotSupported(error_message);
+}
+
+WebEncryptedMediaClientImpl::Reporter* WebEncryptedMediaClientImpl::GetReporter(
+ const blink::WebString& key_system) {
+ // Assumes that empty will not be found by GetKeySystemNameForUMA().
+ // TODO(sandersd): Avoid doing ASCII conversion more than once.
+ std::string key_system_ascii;
+ if (base::IsStringASCII(key_system))
+ key_system_ascii = base::UTF16ToASCII(key_system);
+
+ // Return a per-frame singleton so that UMA reports will be once-per-frame.
+ std::string uma_name = GetKeySystemNameForUMA(key_system_ascii);
+ Reporter* reporter = reporters_.get(uma_name);
+ if (!reporter) {
+ reporter = new Reporter(uma_name);
+ reporters_.add(uma_name, make_scoped_ptr(reporter));
+ }
+ return reporter;
+}
+
+} // namespace media
diff --git a/chromium/media/blink/webencryptedmediaclient_impl.h b/chromium/media/blink/webencryptedmediaclient_impl.h
new file mode 100644
index 00000000000..d34f27252ec
--- /dev/null
+++ b/chromium/media/blink/webencryptedmediaclient_impl.h
@@ -0,0 +1,86 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BLINK_WEBENCRYPTEDMEDIACLIENT_IMPL_H_
+#define MEDIA_BLINK_WEBENCRYPTEDMEDIACLIENT_IMPL_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/media_export.h"
+#include "media/blink/key_system_config_selector.h"
+#include "third_party/WebKit/public/platform/WebEncryptedMediaClient.h"
+
+namespace blink {
+
+class WebContentDecryptionModuleResult;
+struct WebMediaKeySystemConfiguration;
+class WebSecurityOrigin;
+
+} // namespace blink
+
+namespace media {
+
+struct CdmConfig;
+class CdmFactory;
+class KeySystems;
+class MediaPermission;
+
+class MEDIA_EXPORT WebEncryptedMediaClientImpl
+ : public blink::WebEncryptedMediaClient {
+ public:
+ WebEncryptedMediaClientImpl(
+ base::Callback<bool(void)> are_secure_codecs_supported_cb,
+ CdmFactory* cdm_factory,
+ MediaPermission* media_permission);
+ virtual ~WebEncryptedMediaClientImpl();
+
+ // WebEncryptedMediaClient implementation.
+ virtual void requestMediaKeySystemAccess(
+ blink::WebEncryptedMediaRequest request);
+
+ // Create the CDM for |key_system| and |security_origin|. The caller owns
+ // the created cdm (passed back using |result|).
+ void CreateCdm(const blink::WebString& key_system,
+ const blink::WebSecurityOrigin& security_origin,
+ const CdmConfig& cdm_config,
+ blink::WebContentDecryptionModuleResult result);
+
+ private:
+ // Report usage of key system to UMA. There are 2 different counts logged:
+ // 1. The key system is requested.
+ // 2. The requested key system and options are supported.
+ // Each stat is only reported once per renderer frame per key system.
+ class Reporter;
+
+ // Complete a requestMediaKeySystemAccess() request with a supported
+ // accumulated configuration.
+ void OnRequestSucceeded(
+ blink::WebEncryptedMediaRequest request,
+ const blink::WebMediaKeySystemConfiguration& accumulated_configuration,
+ const CdmConfig& cdm_config);
+
+ // Complete a requestMediaKeySystemAccess() request with an error message.
+ void OnRequestNotSupported(blink::WebEncryptedMediaRequest request,
+ const blink::WebString& error_message);
+
+ // Gets the Reporter for |key_system|. If it doesn't already exist,
+ // create one.
+ Reporter* GetReporter(const blink::WebString& key_system);
+
+ // Reporter singletons.
+ base::ScopedPtrHashMap<std::string, scoped_ptr<Reporter>> reporters_;
+
+ base::Callback<bool(void)> are_secure_codecs_supported_cb_;
+ CdmFactory* cdm_factory_;
+ KeySystemConfigSelector key_system_config_selector_;
+ base::WeakPtrFactory<WebEncryptedMediaClientImpl> weak_factory_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BLINK_WEBENCRYPTEDMEDIACLIENT_IMPL_H_
diff --git a/chromium/media/blink/webinbandtexttrack_impl.cc b/chromium/media/blink/webinbandtexttrack_impl.cc
index 49a4880a4d7..720cd43f6e9 100644
--- a/chromium/media/blink/webinbandtexttrack_impl.cc
+++ b/chromium/media/blink/webinbandtexttrack_impl.cc
@@ -12,14 +12,12 @@ WebInbandTextTrackImpl::WebInbandTextTrackImpl(
Kind kind,
const blink::WebString& label,
const blink::WebString& language,
- const blink::WebString& id,
- int index)
+ const blink::WebString& id)
: client_(NULL),
kind_(kind),
label_(label),
language_(language),
- id_(id),
- index_(index) {
+ id_(id) {
}
WebInbandTextTrackImpl::~WebInbandTextTrackImpl() {
@@ -51,8 +49,4 @@ blink::WebString WebInbandTextTrackImpl::id() const {
return id_;
}
-int WebInbandTextTrackImpl::textTrackIndex() const {
- return index_;
-}
-
} // namespace media
diff --git a/chromium/media/blink/webinbandtexttrack_impl.h b/chromium/media/blink/webinbandtexttrack_impl.h
index c08bfad69c2..9c75caec6ad 100644
--- a/chromium/media/blink/webinbandtexttrack_impl.h
+++ b/chromium/media/blink/webinbandtexttrack_impl.h
@@ -15,8 +15,7 @@ class WebInbandTextTrackImpl : public blink::WebInbandTextTrack {
WebInbandTextTrackImpl(Kind kind,
const blink::WebString& label,
const blink::WebString& language,
- const blink::WebString& id,
- int index);
+ const blink::WebString& id);
virtual ~WebInbandTextTrackImpl();
virtual void setClient(blink::WebInbandTextTrackClient* client);
@@ -28,15 +27,12 @@ class WebInbandTextTrackImpl : public blink::WebInbandTextTrack {
virtual blink::WebString language() const;
virtual blink::WebString id() const;
- virtual int textTrackIndex() const;
-
private:
blink::WebInbandTextTrackClient* client_;
Kind kind_;
blink::WebString label_;
blink::WebString language_;
blink::WebString id_;
- int index_;
DISALLOW_COPY_AND_ASSIGN(WebInbandTextTrackImpl);
};
diff --git a/chromium/media/blink/webmediaplayer_impl.cc b/chromium/media/blink/webmediaplayer_impl.cc
index d5a411fcb31..4019de7c8a9 100644
--- a/chromium/media/blink/webmediaplayer_impl.cc
+++ b/chromium/media/blink/webmediaplayer_impl.cc
@@ -5,59 +5,46 @@
#include "media/blink/webmediaplayer_impl.h"
#include <algorithm>
+#include <cmath>
#include <limits>
-#include <string>
-#include <vector>
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/debug/alias.h"
#include "base/debug/crash_logging.h"
-#include "base/debug/trace_event.h"
-#include "base/float_util.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/histogram.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
#include "cc/blink/web_layer_impl.h"
#include "cc/layers/video_layer.h"
-#include "gpu/GLES2/gl2extchromium.h"
-#include "gpu/command_buffer/common/mailbox_holder.h"
+#include "gpu/blink/webgraphicscontext3d_impl.h"
#include "media/audio/null_audio_sink.h"
-#include "media/base/audio_hardware_config.h"
#include "media/base/bind_to_current_loop.h"
+#include "media/base/cdm_context.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
-#include "media/base/pipeline.h"
#include "media/base/text_renderer.h"
#include "media/base/video_frame.h"
-#include "media/blink/buffered_data_source.h"
-#include "media/blink/encrypted_media_player_support.h"
#include "media/blink/texttrack_impl.h"
#include "media/blink/webaudiosourceprovider_impl.h"
+#include "media/blink/webcontentdecryptionmodule_impl.h"
#include "media/blink/webinbandtexttrack_impl.h"
#include "media/blink/webmediaplayer_delegate.h"
-#include "media/blink/webmediaplayer_params.h"
#include "media/blink/webmediaplayer_util.h"
#include "media/blink/webmediasource_impl.h"
-#include "media/filters/audio_renderer_impl.h"
#include "media/filters/chunk_demuxer.h"
-#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
-#include "media/filters/ffmpeg_video_decoder.h"
-#include "media/filters/gpu_video_accelerator_factories.h"
-#include "media/filters/gpu_video_decoder.h"
-#include "media/filters/opus_audio_decoder.h"
-#include "media/filters/renderer_impl.h"
-#include "media/filters/video_renderer_impl.h"
-#include "media/filters/vpx_video_decoder.h"
+#include "third_party/WebKit/public/platform/WebEncryptedMediaTypes.h"
#include "third_party/WebKit/public/platform/WebMediaSource.h"
#include "third_party/WebKit/public/platform/WebRect.h"
#include "third_party/WebKit/public/platform/WebSize.h"
#include "third_party/WebKit/public/platform/WebString.h"
#include "third_party/WebKit/public/platform/WebURL.h"
#include "third_party/WebKit/public/web/WebLocalFrame.h"
+#include "third_party/WebKit/public/web/WebRuntimeFeatures.h"
#include "third_party/WebKit/public/web/WebSecurityOrigin.h"
#include "third_party/WebKit/public/web/WebView.h"
@@ -88,37 +75,20 @@ namespace {
const double kMinRate = 0.0625;
const double kMaxRate = 16.0;
-class SyncPointClientImpl : public media::VideoFrame::SyncPointClient {
- public:
- explicit SyncPointClientImpl(
- blink::WebGraphicsContext3D* web_graphics_context)
- : web_graphics_context_(web_graphics_context) {}
- ~SyncPointClientImpl() override {}
- uint32 InsertSyncPoint() override {
- return web_graphics_context_->insertSyncPoint();
- }
- void WaitSyncPoint(uint32 sync_point) override {
- web_graphics_context_->waitSyncPoint(sync_point);
- }
-
- private:
- blink::WebGraphicsContext3D* web_graphics_context_;
-};
-
} // namespace
namespace media {
class BufferedDataSourceHostImpl;
-#define COMPILE_ASSERT_MATCHING_ENUM(name) \
- COMPILE_ASSERT(static_cast<int>(WebMediaPlayer::CORSMode ## name) == \
- static_cast<int>(BufferedResourceLoader::k ## name), \
- mismatching_enums)
-COMPILE_ASSERT_MATCHING_ENUM(Unspecified);
-COMPILE_ASSERT_MATCHING_ENUM(Anonymous);
-COMPILE_ASSERT_MATCHING_ENUM(UseCredentials);
-#undef COMPILE_ASSERT_MATCHING_ENUM
+#define STATIC_ASSERT_MATCHING_ENUM(name) \
+ static_assert(static_cast<int>(WebMediaPlayer::CORSMode ## name) == \
+ static_cast<int>(BufferedResourceLoader::k ## name), \
+ "mismatching enum values: " #name)
+STATIC_ASSERT_MATCHING_ENUM(Unspecified);
+STATIC_ASSERT_MATCHING_ENUM(Anonymous);
+STATIC_ASSERT_MATCHING_ENUM(UseCredentials);
+#undef STATIC_ASSERT_MATCHING_ENUM
#define BIND_TO_RENDER_LOOP(function) \
(DCHECK(main_task_runner_->BelongsToCurrentThread()), \
@@ -128,68 +98,62 @@ COMPILE_ASSERT_MATCHING_ENUM(UseCredentials);
(DCHECK(main_task_runner_->BelongsToCurrentThread()), \
BindToCurrentLoop(base::Bind(function, AsWeakPtr(), arg1)))
-static void LogMediaSourceError(const scoped_refptr<MediaLog>& media_log,
- const std::string& error) {
- media_log->AddEvent(media_log->CreateMediaSourceErrorEvent(error));
-}
-
WebMediaPlayerImpl::WebMediaPlayerImpl(
blink::WebLocalFrame* frame,
blink::WebMediaPlayerClient* client,
base::WeakPtr<WebMediaPlayerDelegate> delegate,
- scoped_ptr<Renderer> renderer,
+ scoped_ptr<RendererFactory> renderer_factory,
+ CdmFactory* cdm_factory,
const WebMediaPlayerParams& params)
: frame_(frame),
network_state_(WebMediaPlayer::NetworkStateEmpty),
ready_state_(WebMediaPlayer::ReadyStateHaveNothing),
preload_(BufferedDataSource::AUTO),
- main_task_runner_(base::MessageLoopProxy::current()),
+ main_task_runner_(base::ThreadTaskRunnerHandle::Get()),
media_task_runner_(params.media_task_runner()),
media_log_(params.media_log()),
pipeline_(media_task_runner_, media_log_.get()),
load_type_(LoadTypeURL),
opaque_(false),
+ playback_rate_(0.0),
paused_(true),
seeking_(false),
- playback_rate_(0.0f),
ended_(false),
pending_seek_(false),
- pending_seek_seconds_(0.0f),
should_notify_time_changed_(false),
client_(client),
delegate_(delegate),
defer_load_cb_(params.defer_load_cb()),
- gpu_factories_(params.gpu_factories()),
+ context_3d_cb_(params.context_3d_cb()),
supports_save_(true),
chunk_demuxer_(NULL),
- compositor_task_runner_(params.compositor_task_runner()),
+ // Threaded compositing isn't enabled universally yet.
+ compositor_task_runner_(
+ params.compositor_task_runner()
+ ? params.compositor_task_runner()
+ : base::MessageLoop::current()->task_runner()),
compositor_(new VideoFrameCompositor(
+ compositor_task_runner_,
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnNaturalSizeChanged),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnOpacityChanged))),
- text_track_index_(0),
- encrypted_media_support_(
- params.CreateEncryptedMediaPlayerSupport(client)),
- audio_hardware_config_(params.audio_hardware_config()),
- renderer_(renderer.Pass()) {
- DCHECK(encrypted_media_support_);
-
- // Threaded compositing isn't enabled universally yet.
- if (!compositor_task_runner_.get())
- compositor_task_runner_ = base::MessageLoopProxy::current();
-
+ encrypted_media_support_(cdm_factory,
+ client,
+ params.media_permission(),
+ base::Bind(&WebMediaPlayerImpl::SetCdm,
+ AsWeakPtr(),
+ base::Bind(&IgnoreCdmAttached))),
+ renderer_factory_(renderer_factory.Pass()) {
media_log_->AddEvent(
media_log_->CreateEvent(MediaLogEvent::WEBMEDIAPLAYER_CREATED));
- // TODO(xhwang): When we use an external Renderer, many methods won't work,
- // e.g. GetCurrentFrameFromCompositor(). Fix this in a future CL.
- if (renderer_)
- return;
+ if (params.initial_cdm()) {
+ SetCdm(base::Bind(&IgnoreCdmAttached),
+ ToWebContentDecryptionModuleImpl(params.initial_cdm())
+ ->GetCdmContext());
+ }
- // |gpu_factories_| requires that its entry points be called on its
- // |GetTaskRunner()|. Since |pipeline_| will own decoders created from the
- // factories, require that their message loops are identical.
- DCHECK(!gpu_factories_.get() ||
- (gpu_factories_->GetTaskRunner() == media_task_runner_.get()));
+ // TODO(xhwang): When we use an external Renderer, many methods won't work,
+ // e.g. GetCurrentFrameFromCompositor(). See http://crbug.com/434861
// Use the null sink if no sink was provided.
audio_source_provider_ = new WebAudioSourceProviderImpl(
@@ -216,7 +180,7 @@ WebMediaPlayerImpl::~WebMediaPlayerImpl() {
chunk_demuxer_ = NULL;
}
- gpu_factories_ = NULL;
+ renderer_factory_.reset();
// Make sure to kill the pipeline so there's no more media threads running.
// Note: stopping the pipeline might block for a long time.
@@ -273,9 +237,9 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
media_log_.get(),
&buffered_data_source_host_,
base::Bind(&WebMediaPlayerImpl::NotifyDownloading, AsWeakPtr())));
+ data_source_->SetPreload(preload_);
data_source_->Initialize(
base::Bind(&WebMediaPlayerImpl::DataSourceInitialized, AsWeakPtr()));
- data_source_->SetPreload(preload_);
}
void WebMediaPlayerImpl::play() {
@@ -299,10 +263,10 @@ void WebMediaPlayerImpl::pause() {
const bool was_already_paused = paused_ || playback_rate_ == 0;
paused_ = true;
- pipeline_.SetPlaybackRate(0.0f);
+ pipeline_.SetPlaybackRate(0.0);
if (data_source_)
data_source_->MediaIsPaused();
- paused_time_ = pipeline_.GetMediaTime();
+ UpdatePausedTime();
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::PAUSE));
@@ -316,39 +280,70 @@ bool WebMediaPlayerImpl::supportsSave() const {
}
void WebMediaPlayerImpl::seek(double seconds) {
- DVLOG(1) << __FUNCTION__ << "(" << seconds << ")";
+ DVLOG(1) << __FUNCTION__ << "(" << seconds << "s)";
DCHECK(main_task_runner_->BelongsToCurrentThread());
ended_ = false;
+ ReadyState old_state = ready_state_;
if (ready_state_ > WebMediaPlayer::ReadyStateHaveMetadata)
SetReadyState(WebMediaPlayer::ReadyStateHaveMetadata);
- base::TimeDelta seek_time = ConvertSecondsToTimestamp(seconds);
+ base::TimeDelta new_seek_time = ConvertSecondsToTimestamp(seconds);
if (seeking_) {
+ if (new_seek_time == seek_time_) {
+ if (chunk_demuxer_) {
+ if (!pending_seek_) {
+ // If using media source demuxer, only suppress redundant seeks if
+ // there is no pending seek. This enforces that any pending seek that
+ // results in a demuxer seek is preceded by matching
+ // CancelPendingSeek() and StartWaitingForSeek() calls.
+ return;
+ }
+ } else {
+ // Suppress all redundant seeks if unrestricted by media source demuxer
+ // API.
+ pending_seek_ = false;
+ pending_seek_time_ = base::TimeDelta();
+ return;
+ }
+ }
+
pending_seek_ = true;
- pending_seek_seconds_ = seconds;
+ pending_seek_time_ = new_seek_time;
if (chunk_demuxer_)
- chunk_demuxer_->CancelPendingSeek(seek_time);
+ chunk_demuxer_->CancelPendingSeek(pending_seek_time_);
return;
}
media_log_->AddEvent(media_log_->CreateSeekEvent(seconds));
// Update our paused time.
- if (paused_)
- paused_time_ = seek_time;
+ // In paused state ignore the seek operations to current time if the loading
+ // is completed and generate OnPipelineBufferingStateChanged event to
+ // eventually fire seeking and seeked events
+ if (paused_) {
+ if (paused_time_ != new_seek_time) {
+ paused_time_ = new_seek_time;
+ } else if (old_state == ReadyStateHaveEnoughData) {
+ main_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&WebMediaPlayerImpl::OnPipelineBufferingStateChanged,
+ AsWeakPtr(), BUFFERING_HAVE_ENOUGH));
+ return;
+ }
+ }
seeking_ = true;
+ seek_time_ = new_seek_time;
if (chunk_demuxer_)
- chunk_demuxer_->StartWaitingForSeek(seek_time);
+ chunk_demuxer_->StartWaitingForSeek(seek_time_);
// Kick off the asynchronous seek!
- pipeline_.Seek(
- seek_time,
- BIND_TO_RENDER_LOOP1(&WebMediaPlayerImpl::OnPipelineSeeked, true));
+ pipeline_.Seek(seek_time_, BIND_TO_RENDER_LOOP1(
+ &WebMediaPlayerImpl::OnPipelineSeeked, true));
}
void WebMediaPlayerImpl::setRate(double rate) {
@@ -387,14 +382,14 @@ void WebMediaPlayerImpl::setVolume(double volume) {
pipeline_.SetVolume(volume);
}
-#define COMPILE_ASSERT_MATCHING_ENUM(webkit_name, chromium_name) \
- COMPILE_ASSERT(static_cast<int>(WebMediaPlayer::webkit_name) == \
- static_cast<int>(BufferedDataSource::chromium_name), \
- mismatching_enums)
-COMPILE_ASSERT_MATCHING_ENUM(PreloadNone, NONE);
-COMPILE_ASSERT_MATCHING_ENUM(PreloadMetaData, METADATA);
-COMPILE_ASSERT_MATCHING_ENUM(PreloadAuto, AUTO);
-#undef COMPILE_ASSERT_MATCHING_ENUM
+#define STATIC_ASSERT_MATCHING_ENUM(webkit_name, chromium_name) \
+ static_assert(static_cast<int>(WebMediaPlayer::webkit_name) == \
+ static_cast<int>(BufferedDataSource::chromium_name), \
+ "mismatching enum values: " #webkit_name)
+STATIC_ASSERT_MATCHING_ENUM(PreloadNone, NONE);
+STATIC_ASSERT_MATCHING_ENUM(PreloadMetaData, METADATA);
+STATIC_ASSERT_MATCHING_ENUM(PreloadAuto, AUTO);
+#undef STATIC_ASSERT_MATCHING_ENUM
void WebMediaPlayerImpl::setPreload(WebMediaPlayer::Preload preload) {
DVLOG(1) << __FUNCTION__ << "(" << preload << ")";
@@ -465,6 +460,14 @@ double WebMediaPlayerImpl::currentTime() const {
if (ended_)
return duration();
+ // We know the current seek time better than pipeline: pipeline may processing
+ // an earlier seek before a pending seek has been started, or it might not yet
+ // have the current seek time returnable via GetMediaTime().
+ if (seeking()) {
+ return pending_seek_ ? pending_seek_time_.InSecondsF()
+ : seek_time_.InSecondsF();
+ }
+
return (paused_ ? paused_time_ : pipeline_.GetMediaTime()).InSecondsF();
}
@@ -503,7 +506,7 @@ blink::WebTimeRanges WebMediaPlayerImpl::seekable() const {
// Allow a special exception for seeks to zero for streaming sources with a
// finite duration; this allows looping to work.
const bool allow_seek_to_zero = data_source_ && data_source_->IsStreaming() &&
- base::IsFinite(seekable_end);
+ std::isfinite(seekable_end);
// TODO(dalecurtis): Technically this allows seeking on media which return an
// infinite duration so long as DataSource::IsStreaming() is false. While not
@@ -536,13 +539,18 @@ void WebMediaPlayerImpl::paint(blink::WebCanvas* canvas,
GetCurrentFrameFromCompositor();
gfx::Rect gfx_rect(rect);
-
- skcanvas_video_renderer_.Paint(video_frame,
- canvas,
- gfx_rect,
- alpha,
- mode,
- pipeline_metadata_.video_rotation);
+ Context3D context_3d;
+ if (video_frame.get() &&
+ video_frame->format() == VideoFrame::NATIVE_TEXTURE) {
+ if (!context_3d_cb_.is_null()) {
+ context_3d = context_3d_cb_.Run();
+ }
+ // GPU Process crashed.
+ if (!context_3d.gl)
+ return;
+ }
+ skcanvas_video_renderer_.Paint(video_frame, canvas, gfx_rect, alpha, mode,
+ pipeline_metadata_.video_rotation, context_3d);
}
bool WebMediaPlayerImpl::hasSingleSecurityOrigin() const {
@@ -597,48 +605,36 @@ bool WebMediaPlayerImpl::copyVideoTextureToPlatformTexture(
unsigned int type,
bool premultiply_alpha,
bool flip_y) {
+ return copyVideoTextureToPlatformTexture(web_graphics_context, texture,
+ internal_format, type,
+ premultiply_alpha, flip_y);
+}
+
+bool WebMediaPlayerImpl::copyVideoTextureToPlatformTexture(
+ blink::WebGraphicsContext3D* web_graphics_context,
+ unsigned int texture,
+ unsigned int internal_format,
+ unsigned int type,
+ bool premultiply_alpha,
+ bool flip_y) {
TRACE_EVENT0("media", "WebMediaPlayerImpl:copyVideoTextureToPlatformTexture");
scoped_refptr<VideoFrame> video_frame =
GetCurrentFrameFromCompositor();
- if (!video_frame.get())
- return false;
- if (video_frame->format() != VideoFrame::NATIVE_TEXTURE)
- return false;
-
- const gpu::MailboxHolder* mailbox_holder = video_frame->mailbox_holder();
- if (mailbox_holder->texture_target != GL_TEXTURE_2D)
+ if (!video_frame.get() ||
+ video_frame->format() != VideoFrame::NATIVE_TEXTURE) {
return false;
+ }
- web_graphics_context->waitSyncPoint(mailbox_holder->sync_point);
- uint32 source_texture = web_graphics_context->createAndConsumeTextureCHROMIUM(
- GL_TEXTURE_2D, mailbox_holder->mailbox.name);
-
- // The video is stored in a unmultiplied format, so premultiply
- // if necessary.
- web_graphics_context->pixelStorei(GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
- premultiply_alpha);
- // Application itself needs to take care of setting the right flip_y
- // value down to get the expected result.
- // flip_y==true means to reverse the video orientation while
- // flip_y==false means to keep the intrinsic orientation.
- web_graphics_context->pixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
- web_graphics_context->copyTextureCHROMIUM(GL_TEXTURE_2D,
- source_texture,
- texture,
- level,
- internal_format,
- type);
- web_graphics_context->pixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, false);
- web_graphics_context->pixelStorei(GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
- false);
-
- web_graphics_context->deleteTexture(source_texture);
- web_graphics_context->flush();
-
- SyncPointClientImpl client(web_graphics_context);
- video_frame->UpdateReleaseSyncPoint(&client);
+ // TODO(dshwang): need more elegant way to convert WebGraphicsContext3D to
+ // GLES2Interface.
+ gpu::gles2::GLES2Interface* gl =
+ static_cast<gpu_blink::WebGraphicsContext3DImpl*>(web_graphics_context)
+ ->GetGLInterface();
+ SkCanvasVideoRenderer::CopyVideoFrameTextureToGLTexture(
+ gl, video_frame.get(), texture, internal_format, type, premultiply_alpha,
+ flip_y);
return true;
}
@@ -648,7 +644,7 @@ WebMediaPlayerImpl::generateKeyRequest(const WebString& key_system,
unsigned init_data_length) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- return encrypted_media_support_->GenerateKeyRequest(
+ return encrypted_media_support_.GenerateKeyRequest(
frame_, key_system, init_data, init_data_length);
}
@@ -661,7 +657,7 @@ WebMediaPlayer::MediaKeyException WebMediaPlayerImpl::addKey(
const WebString& session_id) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- return encrypted_media_support_->AddKey(
+ return encrypted_media_support_.AddKey(
key_system, key, key_length, init_data, init_data_length, session_id);
}
@@ -670,22 +666,75 @@ WebMediaPlayer::MediaKeyException WebMediaPlayerImpl::cancelKeyRequest(
const WebString& session_id) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- return encrypted_media_support_->CancelKeyRequest(key_system, session_id);
+ return encrypted_media_support_.CancelKeyRequest(key_system, session_id);
}
void WebMediaPlayerImpl::setContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm) {
+ blink::WebContentDecryptionModule* cdm,
+ blink::WebContentDecryptionModuleResult result) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- encrypted_media_support_->SetContentDecryptionModule(cdm);
+ // TODO(xhwang): Support setMediaKeys(0) if necessary: http://crbug.com/330324
+ if (!cdm) {
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
+ "Null MediaKeys object is not supported.");
+ return;
+ }
+
+ SetCdm(BIND_TO_RENDER_LOOP1(&WebMediaPlayerImpl::OnCdmAttached, result),
+ ToWebContentDecryptionModuleImpl(cdm)->GetCdmContext());
}
-void WebMediaPlayerImpl::setContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm,
- blink::WebContentDecryptionModuleResult result) {
- DCHECK(main_task_runner_->BelongsToCurrentThread());
+void WebMediaPlayerImpl::OnEncryptedMediaInitData(
+ EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data) {
+ DCHECK(init_data_type != EmeInitDataType::UNKNOWN);
+
+ // Do not fire "encrypted" event if encrypted media is not enabled.
+ // TODO(xhwang): Handle this in |client_|.
+ if (!blink::WebRuntimeFeatures::isPrefixedEncryptedMediaEnabled() &&
+ !blink::WebRuntimeFeatures::isEncryptedMediaEnabled()) {
+ return;
+ }
+
+ // TODO(xhwang): Update this UMA name.
+ UMA_HISTOGRAM_COUNTS("Media.EME.NeedKey", 1);
+
+ encrypted_media_support_.SetInitDataType(init_data_type);
+
+ client_->encrypted(ConvertToWebInitDataType(init_data_type),
+ vector_as_array(&init_data),
+ base::saturated_cast<unsigned int>(init_data.size()));
+}
+
+void WebMediaPlayerImpl::OnWaitingForDecryptionKey() {
+ client_->didBlockPlaybackWaitingForKey();
- encrypted_media_support_->SetContentDecryptionModule(cdm, result);
+ // TODO(jrummell): didResumePlaybackBlockedForKey() should only be called
+ // when a key has been successfully added (e.g. OnSessionKeysChange() with
+ // |has_additional_usable_key| = true). http://crbug.com/461903
+ client_->didResumePlaybackBlockedForKey();
+}
+
+void WebMediaPlayerImpl::SetCdm(const CdmAttachedCB& cdm_attached_cb,
+ CdmContext* cdm_context) {
+ // If CDM initialization succeeded, tell the pipeline about it.
+ if (cdm_context)
+ pipeline_.SetCdm(cdm_context, cdm_attached_cb);
+}
+
+void WebMediaPlayerImpl::OnCdmAttached(
+ blink::WebContentDecryptionModuleResult result,
+ bool success) {
+ if (success) {
+ result.complete();
+ return;
+ }
+
+ result.completeWithError(
+ blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
+ "Unable to set MediaKeys object");
}
void WebMediaPlayerImpl::OnPipelineSeeked(bool time_changed,
@@ -693,9 +742,12 @@ void WebMediaPlayerImpl::OnPipelineSeeked(bool time_changed,
DVLOG(1) << __FUNCTION__ << "(" << time_changed << ", " << status << ")";
DCHECK(main_task_runner_->BelongsToCurrentThread());
seeking_ = false;
+ seek_time_ = base::TimeDelta();
if (pending_seek_) {
+ double pending_seek_seconds = pending_seek_time_.InSecondsF();
pending_seek_ = false;
- seek(pending_seek_seconds_);
+ pending_seek_time_ = base::TimeDelta();
+ seek(pending_seek_seconds);
return;
}
@@ -706,7 +758,7 @@ void WebMediaPlayerImpl::OnPipelineSeeked(bool time_changed,
// Update our paused time.
if (paused_)
- paused_time_ = pipeline_.GetMediaTime();
+ UpdatePausedTime();
should_notify_time_changed_ = time_changed;
}
@@ -737,7 +789,7 @@ void WebMediaPlayerImpl::OnPipelineError(PipelineStatus error) {
SetNetworkState(PipelineErrorToNetworkState(error));
if (error == PIPELINE_ERROR_DECRYPT)
- encrypted_media_support_->OnPipelineDecryptError();
+ encrypted_media_support_.OnPipelineDecryptError();
}
void WebMediaPlayerImpl::OnPipelineMetadata(
@@ -746,8 +798,7 @@ void WebMediaPlayerImpl::OnPipelineMetadata(
pipeline_metadata_ = metadata;
- UMA_HISTOGRAM_ENUMERATION("Media.VideoRotation",
- metadata.video_rotation,
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoRotation", metadata.video_rotation,
VIDEO_ROTATION_MAX + 1);
SetReadyState(WebMediaPlayer::ReadyStateHaveMetadata);
@@ -781,6 +832,11 @@ void WebMediaPlayerImpl::OnPipelineBufferingStateChanged(
DCHECK_EQ(buffering_state, BUFFERING_HAVE_ENOUGH);
SetReadyState(WebMediaPlayer::ReadyStateHaveEnoughData);
+ // Let the DataSource know we have enough data. It may use this information to
+ // release unused network connections.
+ if (data_source_)
+ data_source_->OnBufferingHaveEnough();
+
// Blink expects a timeChanged() in response to a seek().
if (should_notify_time_changed_)
client_->timeChanged();
@@ -789,7 +845,7 @@ void WebMediaPlayerImpl::OnPipelineBufferingStateChanged(
void WebMediaPlayerImpl::OnDemuxerOpened() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
client_->mediaSourceOpened(new WebMediaSourceImpl(
- chunk_demuxer_, base::Bind(&LogMediaSourceError, media_log_)));
+ chunk_demuxer_, base::Bind(&MediaLog::AddLogEvent, media_log_)));
}
void WebMediaPlayerImpl::OnAddTextTrack(
@@ -807,8 +863,7 @@ void WebMediaPlayerImpl::OnAddTextTrack(
blink::WebString::fromUTF8(config.id());
scoped_ptr<WebInbandTextTrackImpl> web_inband_text_track(
- new WebInbandTextTrackImpl(web_kind, web_label, web_language, web_id,
- text_track_index_++));
+ new WebInbandTextTrackImpl(web_kind, web_label, web_language, web_id));
scoped_ptr<TextTrack> text_track(new TextTrackImpl(
main_task_runner_, client_, web_inband_text_track.Pass()));
@@ -838,52 +893,6 @@ void WebMediaPlayerImpl::NotifyDownloading(bool is_downloading) {
"is_downloading_data", is_downloading));
}
-// TODO(xhwang): Move this to a factory class so that we can create different
-// renderers.
-scoped_ptr<Renderer> WebMediaPlayerImpl::CreateRenderer() {
- SetDecryptorReadyCB set_decryptor_ready_cb =
- encrypted_media_support_->CreateSetDecryptorReadyCB();
-
- // Create our audio decoders and renderer.
- ScopedVector<AudioDecoder> audio_decoders;
-
- audio_decoders.push_back(new media::FFmpegAudioDecoder(
- media_task_runner_, base::Bind(&LogMediaSourceError, media_log_)));
- audio_decoders.push_back(new media::OpusAudioDecoder(media_task_runner_));
-
- scoped_ptr<AudioRenderer> audio_renderer(
- new AudioRendererImpl(media_task_runner_,
- audio_source_provider_.get(),
- audio_decoders.Pass(),
- set_decryptor_ready_cb,
- audio_hardware_config_,
- media_log_));
-
- // Create our video decoders and renderer.
- ScopedVector<VideoDecoder> video_decoders;
-
- if (gpu_factories_.get())
- video_decoders.push_back(new GpuVideoDecoder(gpu_factories_));
-
-#if !defined(MEDIA_DISABLE_LIBVPX)
- video_decoders.push_back(new VpxVideoDecoder(media_task_runner_));
-#endif // !defined(MEDIA_DISABLE_LIBVPX)
-
- video_decoders.push_back(new FFmpegVideoDecoder(media_task_runner_));
-
- scoped_ptr<VideoRenderer> video_renderer(new VideoRendererImpl(
- media_task_runner_,
- video_decoders.Pass(),
- set_decryptor_ready_cb,
- base::Bind(&WebMediaPlayerImpl::FrameReady, base::Unretained(this)),
- true,
- media_log_));
-
- // Create renderer.
- return scoped_ptr<Renderer>(new RendererImpl(
- media_task_runner_, audio_renderer.Pass(), video_renderer.Pass()));
-}
-
void WebMediaPlayerImpl::StartPipeline() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
@@ -892,48 +901,43 @@ void WebMediaPlayerImpl::StartPipeline() {
(load_type_ == LoadTypeMediaSource));
LogCB mse_log_cb;
- Demuxer::NeedKeyCB need_key_cb =
- encrypted_media_support_->CreateNeedKeyCB();
+ Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb =
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnEncryptedMediaInitData);
// Figure out which demuxer to use.
if (load_type_ != LoadTypeMediaSource) {
DCHECK(!chunk_demuxer_);
DCHECK(data_source_);
- demuxer_.reset(new FFmpegDemuxer(
- media_task_runner_, data_source_.get(),
- need_key_cb,
- media_log_));
+ demuxer_.reset(new FFmpegDemuxer(media_task_runner_, data_source_.get(),
+ encrypted_media_init_data_cb, media_log_));
} else {
DCHECK(!chunk_demuxer_);
DCHECK(!data_source_);
- mse_log_cb = base::Bind(&LogMediaSourceError, media_log_);
+ mse_log_cb = base::Bind(&MediaLog::AddLogEvent, media_log_);
chunk_demuxer_ = new ChunkDemuxer(
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnDemuxerOpened),
- need_key_cb,
- mse_log_cb,
- true);
+ encrypted_media_init_data_cb, mse_log_cb, media_log_, true);
demuxer_.reset(chunk_demuxer_);
}
// ... and we're ready to go!
seeking_ = true;
- if (!renderer_)
- renderer_ = CreateRenderer();
-
pipeline_.Start(
demuxer_.get(),
- renderer_.Pass(),
+ renderer_factory_->CreateRenderer(
+ media_task_runner_, audio_source_provider_.get(), compositor_),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineEnded),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineError),
BIND_TO_RENDER_LOOP1(&WebMediaPlayerImpl::OnPipelineSeeked, false),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineMetadata),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineBufferingStateChanged),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnDurationChanged),
- BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnAddTextTrack));
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnAddTextTrack),
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnWaitingForDecryptionKey));
}
void WebMediaPlayerImpl::SetNetworkState(WebMediaPlayer::NetworkState state) {
@@ -1001,21 +1005,12 @@ void WebMediaPlayerImpl::OnOpacityChanged(bool opaque) {
video_weblayer_->setOpaque(opaque_);
}
-void WebMediaPlayerImpl::FrameReady(
- const scoped_refptr<VideoFrame>& frame) {
- compositor_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&VideoFrameCompositor::UpdateCurrentFrame,
- base::Unretained(compositor_),
- frame));
-}
-
static void GetCurrentFrameAndSignal(
VideoFrameCompositor* compositor,
scoped_refptr<VideoFrame>* video_frame_out,
base::WaitableEvent* event) {
TRACE_EVENT0("media", "GetCurrentFrameAndSignal");
- *video_frame_out = compositor->GetCurrentFrame();
+ *video_frame_out = compositor->GetCurrentFrameAndUpdateIfStale();
event->Signal();
}
@@ -1023,7 +1018,7 @@ scoped_refptr<VideoFrame>
WebMediaPlayerImpl::GetCurrentFrameFromCompositor() {
TRACE_EVENT0("media", "WebMediaPlayerImpl::GetCurrentFrameFromCompositor");
if (compositor_task_runner_->BelongsToCurrentThread())
- return compositor_->GetCurrentFrame();
+ return compositor_->GetCurrentFrameAndUpdateIfStale();
// Use a posted task and waitable event instead of a lock otherwise
// WebGL/Canvas can see different content than what the compositor is seeing.
@@ -1038,4 +1033,15 @@ WebMediaPlayerImpl::GetCurrentFrameFromCompositor() {
return video_frame;
}
+void WebMediaPlayerImpl::UpdatePausedTime() {
+ DCHECK(main_task_runner_->BelongsToCurrentThread());
+
+ // pause() may be called after playback has ended and the HTMLMediaElement
+ // requires that currentTime() == duration() after ending. We want to ensure
+ // |paused_time_| matches currentTime() in this case or a future seek() may
+ // incorrectly discard what it thinks is a seek to the existing time.
+ paused_time_ =
+ ended_ ? pipeline_.GetMediaDuration() : pipeline_.GetMediaTime();
+}
+
} // namespace media
diff --git a/chromium/media/blink/webmediaplayer_impl.h b/chromium/media/blink/webmediaplayer_impl.h
index bec5747e5eb..486a68e2fef 100644
--- a/chromium/media/blink/webmediaplayer_impl.h
+++ b/chromium/media/blink/webmediaplayer_impl.h
@@ -14,23 +14,25 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
-#include "media/base/audio_renderer_sink.h"
+#include "media/base/cdm_factory.h"
#include "media/base/media_export.h"
#include "media/base/pipeline.h"
-#include "media/base/renderer.h"
+#include "media/base/renderer_factory.h"
#include "media/base/text_track.h"
#include "media/blink/buffered_data_source.h"
#include "media/blink/buffered_data_source_host_impl.h"
+#include "media/blink/encrypted_media_player_support.h"
+#include "media/blink/skcanvas_video_renderer.h"
#include "media/blink/video_frame_compositor.h"
-#include "media/filters/skcanvas_video_renderer.h"
+#include "media/blink/webmediaplayer_params.h"
#include "third_party/WebKit/public/platform/WebAudioSourceProvider.h"
#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
-#include "third_party/WebKit/public/platform/WebGraphicsContext3D.h"
#include "third_party/WebKit/public/platform/WebMediaPlayer.h"
#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
#include "url/gurl.h"
namespace blink {
+class WebGraphicsContext3D;
class WebLocalFrame;
}
@@ -46,13 +48,11 @@ namespace media {
class AudioHardwareConfig;
class ChunkDemuxer;
-class EncryptedMediaPlayerSupport;
class GpuVideoAcceleratorFactories;
class MediaLog;
class VideoFrameCompositor;
class WebAudioSourceProviderImpl;
class WebMediaPlayerDelegate;
-class WebMediaPlayerParams;
class WebTextTrackImpl;
// The canonical implementation of blink::WebMediaPlayer that's backed by
@@ -70,7 +70,8 @@ class MEDIA_EXPORT WebMediaPlayerImpl
WebMediaPlayerImpl(blink::WebLocalFrame* frame,
blink::WebMediaPlayerClient* client,
base::WeakPtr<WebMediaPlayerDelegate> delegate,
- scoped_ptr<Renderer> renderer,
+ scoped_ptr<RendererFactory> renderer_factory,
+ CdmFactory* cdm_factory,
const WebMediaPlayerParams& params);
virtual ~WebMediaPlayerImpl();
@@ -127,6 +128,7 @@ class MEDIA_EXPORT WebMediaPlayerImpl
virtual unsigned audioDecodedByteCount() const;
virtual unsigned videoDecodedByteCount() const;
+ // TODO(dshwang): remove |level|. crbug.com/443151
virtual bool copyVideoTextureToPlatformTexture(
blink::WebGraphicsContext3D* web_graphics_context,
unsigned int texture,
@@ -135,6 +137,13 @@ class MEDIA_EXPORT WebMediaPlayerImpl
unsigned int type,
bool premultiply_alpha,
bool flip_y);
+ virtual bool copyVideoTextureToPlatformTexture(
+ blink::WebGraphicsContext3D* web_graphics_context,
+ unsigned int texture,
+ unsigned int internal_format,
+ unsigned int type,
+ bool premultiply_alpha,
+ bool flip_y);
virtual blink::WebAudioSourceProvider* audioSourceProvider();
@@ -154,10 +163,6 @@ class MEDIA_EXPORT WebMediaPlayerImpl
const blink::WebString& key_system,
const blink::WebString& session_id);
- // TODO(jrummell): Remove this method once Blink updated to use the other
- // method.
- virtual void setContentDecryptionModule(
- blink::WebContentDecryptionModule* cdm);
virtual void setContentDecryptionModule(
blink::WebContentDecryptionModule* cdm,
blink::WebContentDecryptionModuleResult result);
@@ -211,6 +216,26 @@ class MEDIA_EXPORT WebMediaPlayerImpl
// compositor can return the frame.
scoped_refptr<VideoFrame> GetCurrentFrameFromCompositor();
+ // Called when the demuxer encounters encrypted streams.
+ void OnEncryptedMediaInitData(EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data);
+
+ // Called when a decoder detects that the key needed to decrypt the stream
+ // is not available.
+ void OnWaitingForDecryptionKey();
+
+ // Sets |cdm_context| on the pipeline and fires |cdm_attached_cb| when done.
+ // Parameter order is reversed for easy binding.
+ void SetCdm(const CdmAttachedCB& cdm_attached_cb, CdmContext* cdm_context);
+
+ // Called when a CDM has been attached to the |pipeline_|.
+ void OnCdmAttached(blink::WebContentDecryptionModuleResult result,
+ bool success);
+
+ // Updates |paused_time_| to the current media time with consideration for the
+ // |ended_| state by clamping current time to duration upon |ended_|.
+ void UpdatePausedTime();
+
blink::WebLocalFrame* frame_;
// TODO(hclam): get rid of these members and read from the pipeline directly.
@@ -249,10 +274,11 @@ class MEDIA_EXPORT WebMediaPlayerImpl
// time we pause and then return that value in currentTime(). Otherwise our
// clock can creep forward a little bit while the asynchronous
// SetPlaybackRate(0) is being executed.
- bool paused_;
- bool seeking_;
double playback_rate_;
+ bool paused_;
base::TimeDelta paused_time_;
+ bool seeking_;
+ base::TimeDelta seek_time_; // Meaningless when |seeking_| is false.
// TODO(scherkus): Replace with an explicit ended signal to HTMLMediaElement,
// see http://crbug.com/409280
@@ -261,7 +287,8 @@ class MEDIA_EXPORT WebMediaPlayerImpl
// Seek gets pending if another seek is in progress. Only last pending seek
// will have effect.
bool pending_seek_;
- double pending_seek_seconds_;
+ // |pending_seek_time_| is meaningless when |pending_seek_| is false.
+ base::TimeDelta pending_seek_time_;
// Tracks whether to issue time changed notifications during buffering state
// changes.
@@ -271,10 +298,8 @@ class MEDIA_EXPORT WebMediaPlayerImpl
base::WeakPtr<WebMediaPlayerDelegate> delegate_;
- base::Callback<void(const base::Closure&)> defer_load_cb_;
-
- // Factories for supporting video accelerators. May be null.
- scoped_refptr<GpuVideoAcceleratorFactories> gpu_factories_;
+ WebMediaPlayerParams::DeferLoadCB defer_load_cb_;
+ WebMediaPlayerParams::Context3DCB context_3d_cb_;
// Routes audio playback to either AudioRendererSink or WebAudio.
scoped_refptr<WebAudioSourceProviderImpl> audio_source_provider_;
@@ -302,14 +327,9 @@ class MEDIA_EXPORT WebMediaPlayerImpl
// playback.
scoped_ptr<cc_blink::WebLayerImpl> video_weblayer_;
- // Text track objects get a unique index value when they're created.
- int text_track_index_;
-
- scoped_ptr<EncryptedMediaPlayerSupport> encrypted_media_support_;
-
- const AudioHardwareConfig& audio_hardware_config_;
+ EncryptedMediaPlayerSupport encrypted_media_support_;
- scoped_ptr<Renderer> renderer_;
+ scoped_ptr<RendererFactory> renderer_factory_;
DISALLOW_COPY_AND_ASSIGN(WebMediaPlayerImpl);
};
diff --git a/chromium/media/blink/webmediaplayer_params.cc b/chromium/media/blink/webmediaplayer_params.cc
index 1fd2b8af80f..ec602a227c9 100644
--- a/chromium/media/blink/webmediaplayer_params.cc
+++ b/chromium/media/blink/webmediaplayer_params.cc
@@ -7,42 +7,28 @@
#include "base/single_thread_task_runner.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/media_log.h"
-#include "media/filters/gpu_video_accelerator_factories.h"
namespace media {
WebMediaPlayerParams::WebMediaPlayerParams(
- const base::Callback<void(const base::Closure&)>& defer_load_cb,
+ const DeferLoadCB& defer_load_cb,
const scoped_refptr<AudioRendererSink>& audio_renderer_sink,
- const AudioHardwareConfig& audio_hardware_config,
const scoped_refptr<MediaLog>& media_log,
- const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner,
- const EncryptedMediaPlayerSupportCreateCB&
- encrypted_media_player_support_cb,
+ const Context3DCB& context_3d_cb,
+ MediaPermission* media_permission,
blink::WebContentDecryptionModule* initial_cdm)
: defer_load_cb_(defer_load_cb),
audio_renderer_sink_(audio_renderer_sink),
- audio_hardware_config_(audio_hardware_config),
media_log_(media_log),
- gpu_factories_(gpu_factories),
media_task_runner_(media_task_runner),
compositor_task_runner_(compositor_task_runner),
- encrypted_media_player_support_cb_(encrypted_media_player_support_cb),
+ context_3d_cb_(context_3d_cb),
+ media_permission_(media_permission),
initial_cdm_(initial_cdm) {
}
WebMediaPlayerParams::~WebMediaPlayerParams() {}
-scoped_ptr<EncryptedMediaPlayerSupport>
-WebMediaPlayerParams::CreateEncryptedMediaPlayerSupport(
- blink::WebMediaPlayerClient* client) const {
- scoped_ptr<EncryptedMediaPlayerSupport> encrypted_media_support =
- encrypted_media_player_support_cb_.Run(client);
- if (encrypted_media_support)
- encrypted_media_support->SetInitialContentDecryptionModule(initial_cdm_);
- return encrypted_media_support.Pass();
-}
-
} // namespace media
diff --git a/chromium/media/blink/webmediaplayer_params.h b/chromium/media/blink/webmediaplayer_params.h
index c765548d4b0..02ca00789e6 100644
--- a/chromium/media/blink/webmediaplayer_params.h
+++ b/chromium/media/blink/webmediaplayer_params.h
@@ -8,91 +8,82 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "media/base/media_export.h"
-#include "media/blink/encrypted_media_player_support.h"
+#include "media/filters/context_3d.h"
namespace base {
class SingleThreadTaskRunner;
}
namespace blink {
+class WebContentDecryptionModule;
class WebMediaPlayerClient;
}
namespace media {
-class AudioHardwareConfig;
+
class AudioRendererSink;
-class GpuVideoAcceleratorFactories;
class MediaLog;
+class MediaPermission;
// Holds parameters for constructing WebMediaPlayerImpl without having
// to plumb arguments through various abstraction layers.
class MEDIA_EXPORT WebMediaPlayerParams {
public:
- // Callback used to create EncryptedMediaPlayerSupport instances. This
- // callback must always return a valid EncryptedMediaPlayerSupport object.
- typedef base::Callback<scoped_ptr<EncryptedMediaPlayerSupport>(
- blink::WebMediaPlayerClient*)> EncryptedMediaPlayerSupportCreateCB;
typedef base::Callback<void(const base::Closure&)> DeferLoadCB;
+ typedef base::Callback<Context3D()> Context3DCB;
- // |defer_load_cb|, |audio_renderer_sink|, and |compositor_task_runner| may be
- // null.
+ // |defer_load_cb|, |audio_renderer_sink|, |compositor_task_runner|, and
+ // |context_3d_cb| may be null.
WebMediaPlayerParams(
const DeferLoadCB& defer_load_cb,
const scoped_refptr<AudioRendererSink>& audio_renderer_sink,
- const AudioHardwareConfig& audio_hardware_config,
const scoped_refptr<MediaLog>& media_log,
- const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner,
- const EncryptedMediaPlayerSupportCreateCB&
- encrypted_media_player_support_cb,
+ const Context3DCB& context_3d,
+ MediaPermission* media_permission,
blink::WebContentDecryptionModule* initial_cdm);
~WebMediaPlayerParams();
- base::Callback<void(const base::Closure&)> defer_load_cb() const {
- return defer_load_cb_;
- }
+ DeferLoadCB defer_load_cb() const { return defer_load_cb_; }
const scoped_refptr<AudioRendererSink>& audio_renderer_sink() const {
return audio_renderer_sink_;
}
- const AudioHardwareConfig& audio_hardware_config() const {
- return audio_hardware_config_;
- }
-
const scoped_refptr<MediaLog>& media_log() const {
return media_log_;
}
- const scoped_refptr<GpuVideoAcceleratorFactories>&
- gpu_factories() const {
- return gpu_factories_;
- }
-
- const scoped_refptr<base::SingleThreadTaskRunner>&
- media_task_runner() const {
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner() const {
return media_task_runner_;
}
- const scoped_refptr<base::SingleThreadTaskRunner>&
- compositor_task_runner() const {
+ const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner()
+ const {
return compositor_task_runner_;
}
- scoped_ptr<EncryptedMediaPlayerSupport>
- CreateEncryptedMediaPlayerSupport(blink::WebMediaPlayerClient* client) const;
+ Context3DCB context_3d_cb() const { return context_3d_cb_; }
+
+ MediaPermission* media_permission() const { return media_permission_; }
+
+ blink::WebContentDecryptionModule* initial_cdm() const {
+ return initial_cdm_;
+ }
+
private:
- base::Callback<void(const base::Closure&)> defer_load_cb_;
+ DeferLoadCB defer_load_cb_;
scoped_refptr<AudioRendererSink> audio_renderer_sink_;
- const AudioHardwareConfig& audio_hardware_config_;
scoped_refptr<MediaLog> media_log_;
- scoped_refptr<GpuVideoAcceleratorFactories> gpu_factories_;
scoped_refptr<base::SingleThreadTaskRunner> media_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> compositor_task_runner_;
- EncryptedMediaPlayerSupportCreateCB encrypted_media_player_support_cb_;
+ Context3DCB context_3d_cb_;
+
+ // TODO(xhwang): Remove after prefixed EME API support is removed.
+ MediaPermission* media_permission_;
blink::WebContentDecryptionModule* initial_cdm_;
DISALLOW_IMPLICIT_CONSTRUCTORS(WebMediaPlayerParams);
diff --git a/chromium/media/blink/webmediaplayer_util.cc b/chromium/media/blink/webmediaplayer_util.cc
index bcaf13567aa..c5764349645 100644
--- a/chromium/media/blink/webmediaplayer_util.cc
+++ b/chromium/media/blink/webmediaplayer_util.cc
@@ -14,14 +14,14 @@ namespace media {
// Compile asserts shared by all platforms.
-#define COMPILE_ASSERT_MATCHING_ENUM(name) \
- COMPILE_ASSERT( \
+#define STATIC_ASSERT_MATCHING_ENUM(name) \
+ static_assert( \
static_cast<int>(blink::WebMediaPlayerClient::MediaKeyErrorCode ## name) == \
static_cast<int>(MediaKeys::k ## name ## Error), \
- mismatching_enums)
-COMPILE_ASSERT_MATCHING_ENUM(Unknown);
-COMPILE_ASSERT_MATCHING_ENUM(Client);
-#undef COMPILE_ASSERT_MATCHING_ENUM
+ "mismatching enum values: " #name)
+STATIC_ASSERT_MATCHING_ENUM(Unknown);
+STATIC_ASSERT_MATCHING_ENUM(Client);
+#undef STATIC_ASSERT_MATCHING_ENUM
base::TimeDelta ConvertSecondsToTimestamp(double seconds) {
double microseconds = seconds * base::Time::kMicrosecondsPerSecond;
@@ -117,4 +117,38 @@ void ReportMediaSchemeUma(const GURL& url) {
kMaxURLScheme + 1);
}
+EmeInitDataType ConvertToEmeInitDataType(
+ blink::WebEncryptedMediaInitDataType init_data_type) {
+ switch (init_data_type) {
+ case blink::WebEncryptedMediaInitDataType::Webm:
+ return EmeInitDataType::WEBM;
+ case blink::WebEncryptedMediaInitDataType::Cenc:
+ return EmeInitDataType::CENC;
+ case blink::WebEncryptedMediaInitDataType::Keyids:
+ return EmeInitDataType::KEYIDS;
+ case blink::WebEncryptedMediaInitDataType::Unknown:
+ return EmeInitDataType::UNKNOWN;
+ }
+
+ NOTREACHED();
+ return EmeInitDataType::UNKNOWN;
+}
+
+blink::WebEncryptedMediaInitDataType ConvertToWebInitDataType(
+ EmeInitDataType init_data_type) {
+ switch (init_data_type) {
+ case EmeInitDataType::WEBM:
+ return blink::WebEncryptedMediaInitDataType::Webm;
+ case EmeInitDataType::CENC:
+ return blink::WebEncryptedMediaInitDataType::Cenc;
+ case EmeInitDataType::KEYIDS:
+ return blink::WebEncryptedMediaInitDataType::Keyids;
+ case EmeInitDataType::UNKNOWN:
+ return blink::WebEncryptedMediaInitDataType::Unknown;
+ }
+
+ NOTREACHED();
+ return blink::WebEncryptedMediaInitDataType::Unknown;
+}
+
} // namespace media
diff --git a/chromium/media/blink/webmediaplayer_util.h b/chromium/media/blink/webmediaplayer_util.h
index 159be7714ce..005fbb958a7 100644
--- a/chromium/media/blink/webmediaplayer_util.h
+++ b/chromium/media/blink/webmediaplayer_util.h
@@ -6,9 +6,11 @@
#define MEDIA_BLINK_WEBMEDIAPLAYER_UTIL_H_
#include "base/time/time.h"
+#include "media/base/eme_constants.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
#include "media/base/ranges.h"
+#include "third_party/WebKit/public/platform/WebEncryptedMediaTypes.h"
#include "third_party/WebKit/public/platform/WebMediaPlayer.h"
#include "third_party/WebKit/public/platform/WebTimeRange.h"
#include "url/gurl.h"
@@ -30,6 +32,12 @@ blink::WebMediaPlayer::NetworkState MEDIA_EXPORT PipelineErrorToNetworkState(
// Report the scheme of Media URIs.
void MEDIA_EXPORT ReportMediaSchemeUma(const GURL& url);
+// Convert Initialization Data Types.
+EmeInitDataType MEDIA_EXPORT
+ConvertToEmeInitDataType(blink::WebEncryptedMediaInitDataType init_data_type);
+blink::WebEncryptedMediaInitDataType MEDIA_EXPORT
+ConvertToWebInitDataType(EmeInitDataType init_data_type);
+
} // namespace media
#endif // MEDIA_BLINK_WEBMEDIAPLAYER_UTIL_H_
diff --git a/chromium/media/blink/webmediasource_impl.cc b/chromium/media/blink/webmediasource_impl.cc
index aecad6f307f..d67ca6d7d7f 100644
--- a/chromium/media/blink/webmediasource_impl.cc
+++ b/chromium/media/blink/webmediasource_impl.cc
@@ -15,14 +15,14 @@ using ::blink::WebMediaSource;
namespace media {
-#define COMPILE_ASSERT_MATCHING_STATUS_ENUM(webkit_name, chromium_name) \
- COMPILE_ASSERT(static_cast<int>(WebMediaSource::webkit_name) == \
- static_cast<int>(ChunkDemuxer::chromium_name), \
- mismatching_status_enums)
-COMPILE_ASSERT_MATCHING_STATUS_ENUM(AddStatusOk, kOk);
-COMPILE_ASSERT_MATCHING_STATUS_ENUM(AddStatusNotSupported, kNotSupported);
-COMPILE_ASSERT_MATCHING_STATUS_ENUM(AddStatusReachedIdLimit, kReachedIdLimit);
-#undef COMPILE_ASSERT_MATCHING_STATUS_ENUM
+#define STATIC_ASSERT_MATCHING_STATUS_ENUM(webkit_name, chromium_name) \
+ static_assert(static_cast<int>(WebMediaSource::webkit_name) == \
+ static_cast<int>(ChunkDemuxer::chromium_name), \
+ "mismatching status enum values: " #webkit_name)
+STATIC_ASSERT_MATCHING_STATUS_ENUM(AddStatusOk, kOk);
+STATIC_ASSERT_MATCHING_STATUS_ENUM(AddStatusNotSupported, kNotSupported);
+STATIC_ASSERT_MATCHING_STATUS_ENUM(AddStatusReachedIdLimit, kReachedIdLimit);
+#undef STATIC_ASSERT_MATCHING_STATUS_ENUM
WebMediaSourceImpl::WebMediaSourceImpl(
ChunkDemuxer* demuxer, LogCB log_cb)
diff --git a/chromium/media/blink/websourcebuffer_impl.cc b/chromium/media/blink/websourcebuffer_impl.cc
index 3491378f2d9..0e387785ae1 100644
--- a/chromium/media/blink/websourcebuffer_impl.cc
+++ b/chromium/media/blink/websourcebuffer_impl.cc
@@ -4,19 +4,19 @@
#include "media/blink/websourcebuffer_impl.h"
+#include <cmath>
#include <limits>
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
-#include "base/float_util.h"
#include "media/filters/chunk_demuxer.h"
#include "third_party/WebKit/public/platform/WebSourceBufferClient.h"
namespace media {
static base::TimeDelta DoubleToTimeDelta(double time) {
- DCHECK(!base::IsNaN(time));
+ DCHECK(!std::isnan(time));
DCHECK_NE(time, -std::numeric_limits<double>::infinity());
if (time == std::numeric_limits<double>::infinity())
diff --git a/chromium/media/cast/BUILD.gn b/chromium/media/cast/BUILD.gn
index 15d76f85eed..b6df09f4e87 100644
--- a/chromium/media/cast/BUILD.gn
+++ b/chromium/media/cast/BUILD.gn
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//testing/test.gni")
+
component("cast") {
deps = [
":sender",
@@ -52,6 +54,7 @@ source_set("common") {
"//base",
"//crypto",
"//net",
+ "//third_party/zlib",
]
}
@@ -66,34 +69,34 @@ source_set("net") {
"net/pacing/paced_sender.cc",
"net/pacing/paced_sender.h",
"net/rtcp/receiver_rtcp_event_subscriber.cc",
- "net/rtcp/rtcp_defines.cc",
- "net/rtcp/rtcp_defines.h",
- "net/rtcp/rtcp.h",
"net/rtcp/rtcp.cc",
+ "net/rtcp/rtcp.h",
"net/rtcp/rtcp_builder.cc",
"net/rtcp/rtcp_builder.h",
+ "net/rtcp/rtcp_defines.cc",
+ "net/rtcp/rtcp_defines.h",
"net/rtcp/rtcp_utility.cc",
"net/rtcp/rtcp_utility.h",
- "net/rtp/packet_storage.cc",
- "net/rtp/packet_storage.h",
- "net/rtp/rtp_packetizer.cc",
- "net/rtp/rtp_packetizer.h",
- "net/rtp/rtp_sender.cc",
- "net/rtp/rtp_sender.h",
- "net/udp_transport.cc",
- "net/udp_transport.h",
"net/rtp/cast_message_builder.cc",
"net/rtp/cast_message_builder.h",
"net/rtp/frame_buffer.cc",
"net/rtp/frame_buffer.h",
"net/rtp/framer.cc",
"net/rtp/framer.h",
+ "net/rtp/packet_storage.cc",
+ "net/rtp/packet_storage.h",
"net/rtp/receiver_stats.cc",
"net/rtp/receiver_stats.h",
+ "net/rtp/rtp_packetizer.cc",
+ "net/rtp/rtp_packetizer.h",
"net/rtp/rtp_parser.cc",
"net/rtp/rtp_parser.h",
"net/rtp/rtp_receiver_defines.cc",
"net/rtp/rtp_receiver_defines.h",
+ "net/rtp/rtp_sender.cc",
+ "net/rtp/rtp_sender.h",
+ "net/udp_transport.cc",
+ "net/udp_transport.h",
]
deps = [
@@ -106,24 +109,28 @@ source_set("sender") {
"cast_sender.h",
"cast_sender_impl.cc",
"cast_sender_impl.h",
- "sender/audio_encoder.h",
"sender/audio_encoder.cc",
- "sender/audio_sender.h",
+ "sender/audio_encoder.h",
"sender/audio_sender.cc",
- "sender/congestion_control.h",
+ "sender/audio_sender.h",
"sender/congestion_control.cc",
- "sender/external_video_encoder.h",
+ "sender/congestion_control.h",
"sender/external_video_encoder.cc",
- "sender/fake_software_video_encoder.h",
+ "sender/external_video_encoder.h",
"sender/fake_software_video_encoder.cc",
+ "sender/fake_software_video_encoder.h",
"sender/frame_sender.cc",
"sender/frame_sender.h",
+ "sender/size_adaptable_video_encoder_base.cc",
+ "sender/size_adaptable_video_encoder_base.h",
"sender/software_video_encoder.h",
+ "sender/video_encoder.cc",
"sender/video_encoder.h",
- "sender/video_encoder_impl.h",
"sender/video_encoder_impl.cc",
- "sender/video_sender.h",
+ "sender/video_encoder_impl.h",
+ "sender/video_frame_factory.h",
"sender/video_sender.cc",
+ "sender/video_sender.h",
"sender/vp8_encoder.cc",
"sender/vp8_encoder.h",
]
@@ -135,6 +142,7 @@ source_set("sender") {
"//media:shared_memory_support",
"//third_party/libvpx",
"//third_party/opus",
+ "//ui/gfx/geometry",
]
# use a restricted subset of media and no software codecs on iOS
@@ -146,14 +154,35 @@ source_set("sender") {
"//third_party/opus",
]
sources -= [
- "sender/external_video_encoder.h",
"sender/external_video_encoder.cc",
- "sender/video_encoder_impl.h",
+ "sender/external_video_encoder.h",
"sender/video_encoder_impl.cc",
+ "sender/video_encoder_impl.h",
"sender/vp8_encoder.cc",
"sender/vp8_encoder.h",
]
}
+
+ libs = []
+
+ # iOS and OS X encoders
+ if (is_ios || is_mac) {
+ sources += [
+ "sender/h264_vt_encoder.cc",
+ "sender/h264_vt_encoder.h",
+ ]
+
+ libs += [
+ "CoreVideo.framework",
+ ]
+ }
+
+ if (is_mac) {
+ # Required by audio_encoder.cc.
+ libs += [
+ "AudioToolbox.framework",
+ ]
+ }
}
source_set("receiver") {
@@ -190,20 +219,20 @@ source_set("receiver") {
}
}
-source_set("test_support") {
+static_library("test_support") {
testonly = true
sources = [
-# TODO(hclam): FFmpeg.
-# "test/fake_media_source.cc",
-# "test/fake_media_source.h",
+ # TODO(hclam): FFmpeg.
+ # "test/fake_media_source.cc",
+ # "test/fake_media_source.h",
"test/fake_single_thread_task_runner.cc",
"test/fake_single_thread_task_runner.h",
+ "test/loopback_transport.cc",
+ "test/loopback_transport.h",
"test/skewed_single_thread_task_runner.cc",
"test/skewed_single_thread_task_runner.h",
"test/skewed_tick_clock.cc",
"test/skewed_tick_clock.h",
- "test/loopback_transport.cc",
- "test/loopback_transport.h",
"test/utility/audio_utility.cc",
"test/utility/audio_utility.h",
"test/utility/barcode.cc",
@@ -218,10 +247,10 @@ source_set("test_support") {
"test/utility/net_utility.h",
"test/utility/standalone_cast_environment.cc",
"test/utility/standalone_cast_environment.h",
- "test/utility/video_utility.cc",
- "test/utility/video_utility.h",
"test/utility/udp_proxy.cc",
"test/utility/udp_proxy.h",
+ "test/utility/video_utility.cc",
+ "test/utility/video_utility.h",
]
deps = [
@@ -229,8 +258,9 @@ source_set("test_support") {
":receiver",
"//base/test:test_support",
"//testing/gtest",
-# TODO(hclam): Does not build on ARM yet.
-# "//third_party/ffmpeg",
+
+ # TODO(hclam): Does not build on ARM yet.
+ # "//third_party/ffmpeg",
"//third_party/libyuv",
"//third_party/mt19937ar",
"//ui/gfx",
@@ -241,21 +271,24 @@ source_set("test_support") {
test("cast_unittests") {
sources = [
"logging/encoding_event_subscriber_unittest.cc",
- "logging/serialize_deserialize_test.cc",
"logging/logging_impl_unittest.cc",
"logging/logging_raw_unittest.cc",
"logging/receiver_time_offset_estimator_impl_unittest.cc",
+ "logging/serialize_deserialize_test.cc",
"logging/simple_event_subscriber_unittest.cc",
"logging/stats_event_subscriber_unittest.cc",
"net/cast_transport_sender_impl_unittest.cc",
+ "net/mock_cast_transport_sender.cc",
+ "net/mock_cast_transport_sender.h",
"net/pacing/mock_paced_packet_sender.cc",
"net/pacing/mock_paced_packet_sender.h",
"net/pacing/paced_sender_unittest.cc",
+ "net/rtcp/receiver_rtcp_event_subscriber_unittest.cc",
"net/rtcp/rtcp_builder_unittest.cc",
"net/rtcp/rtcp_unittest.cc",
"net/rtcp/rtcp_utility_unittest.cc",
- "net/rtcp/receiver_rtcp_event_subscriber_unittest.cc",
-# TODO(miu): The following two are test utility modules. Rename/move the files.
+
+ # TODO(miu): The following two are test utility modules. Rename/move the files.
"net/rtcp/test_rtcp_packet_builder.cc",
"net/rtcp/test_rtcp_packet_builder.h",
"net/rtp/cast_message_builder_unittest.cc",
@@ -268,8 +301,8 @@ test("cast_unittests") {
"net/rtp/rtp_header_parser.cc",
"net/rtp/rtp_header_parser.h",
"net/rtp/rtp_packet_builder.cc",
- "net/rtp/rtp_parser_unittest.cc",
"net/rtp/rtp_packetizer_unittest.cc",
+ "net/rtp/rtp_parser_unittest.cc",
"net/rtp/rtp_receiver_defines.h",
"net/udp_transport_unittest.cc",
"receiver/audio_decoder_unittest.cc",
@@ -278,14 +311,13 @@ test("cast_unittests") {
"sender/audio_encoder_unittest.cc",
"sender/audio_sender_unittest.cc",
"sender/congestion_control_unittest.cc",
- "sender/external_video_encoder_unittest.cc",
- "sender/video_encoder_impl_unittest.cc",
+ "sender/fake_video_encode_accelerator_factory.cc",
+ "sender/fake_video_encode_accelerator_factory.h",
+ "sender/video_encoder_unittest.cc",
"sender/video_sender_unittest.cc",
"test/end2end_unittest.cc",
"test/fake_receiver_time_offset_estimator.cc",
"test/fake_receiver_time_offset_estimator.h",
- "test/fake_video_encode_accelerator.cc",
- "test/fake_video_encode_accelerator.h",
"test/utility/audio_utility_unittest.cc",
"test/utility/barcode_unittest.cc",
]
@@ -302,6 +334,23 @@ test("cast_unittests") {
]
}
+if (is_ios || is_mac) {
+ test("cast_h264_vt_encoder_unittests") {
+ sources = [
+ "sender/h264_vt_encoder_unittest.cc",
+ ]
+ deps = [
+ ":common",
+ ":sender",
+ ":test_support",
+ "//base/test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/ffmpeg",
+ ]
+ }
+}
+
executable("generate_barcode_video") {
testonly = true
sources = [
@@ -340,3 +389,39 @@ executable("udp_proxy") {
"//net",
]
}
+
+if (is_linux && !is_chromeos) {
+ # TODO(GYP): Figure out which of these work and are needed on other platforms.
+ test("cast_benchmarks") {
+ sources = [
+ "test/cast_benchmarks.cc",
+ "test/fake_single_thread_task_runner.cc",
+ "test/fake_single_thread_task_runner.h",
+ "test/utility/test_util.cc",
+ "test/utility/test_util.h",
+ ]
+
+ deps = [
+ ":common",
+ ":net",
+ ":receiver",
+ ":sender",
+ ":test_support",
+ "//base/test:test_support",
+ "//net",
+ "//testing/gtest",
+ ]
+ }
+
+ test("tap_proxy") {
+ sources = [
+ "test/utility/tap_proxy.cc",
+ ]
+
+ deps = [
+ ":test_support",
+ "//base",
+ "//media",
+ ]
+ }
+}
diff --git a/chromium/media/cast/OWNERS b/chromium/media/cast/OWNERS
index f8e61c33ccc..586a2f5405e 100644
--- a/chromium/media/cast/OWNERS
+++ b/chromium/media/cast/OWNERS
@@ -1,5 +1,2 @@
-hclam@chromium.org
hubbe@chromium.org
-mikhal@chromium.org
miu@chromium.org
-pwestin@google.com
diff --git a/chromium/media/cast/cast.gyp b/chromium/media/cast/cast.gyp
index f0d4b79a279..ef26fac1ec0 100644
--- a/chromium/media/cast/cast.gyp
+++ b/chromium/media/cast/cast.gyp
@@ -99,14 +99,6 @@
],
'sources': [
'cast_receiver.h',
- 'receiver/audio_decoder.cc',
- 'receiver/audio_decoder.h',
- 'receiver/cast_receiver_impl.cc',
- 'receiver/cast_receiver_impl.h',
- 'receiver/frame_receiver.cc',
- 'receiver/frame_receiver.h',
- 'receiver/video_decoder.cc',
- 'receiver/video_decoder.h',
'net/rtp/cast_message_builder.cc',
'net/rtp/cast_message_builder.h',
'net/rtp/frame_buffer.cc',
@@ -115,10 +107,16 @@
'net/rtp/framer.h',
'net/rtp/receiver_stats.cc',
'net/rtp/receiver_stats.h',
- 'net/rtp/rtp_parser.cc',
- 'net/rtp/rtp_parser.h',
'net/rtp/rtp_receiver_defines.cc',
'net/rtp/rtp_receiver_defines.h',
+ 'receiver/audio_decoder.cc',
+ 'receiver/audio_decoder.h',
+ 'receiver/cast_receiver_impl.cc',
+ 'receiver/cast_receiver_impl.h',
+ 'receiver/frame_receiver.cc',
+ 'receiver/frame_receiver.h',
+ 'receiver/video_decoder.cc',
+ 'receiver/video_decoder.h',
], # source
'conditions': [
# use a restricted subset of media and no software codecs on iOS
@@ -146,29 +144,34 @@
'<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/third_party/opus/opus.gyp:opus',
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
], # dependencies
'sources': [
'cast_sender.h',
'cast_sender_impl.cc',
'cast_sender_impl.h',
- 'sender/audio_encoder.h',
'sender/audio_encoder.cc',
- 'sender/audio_sender.h',
+ 'sender/audio_encoder.h',
'sender/audio_sender.cc',
- 'sender/congestion_control.h',
+ 'sender/audio_sender.h',
'sender/congestion_control.cc',
- 'sender/external_video_encoder.h',
+ 'sender/congestion_control.h',
'sender/external_video_encoder.cc',
- 'sender/fake_software_video_encoder.h',
+ 'sender/external_video_encoder.h',
'sender/fake_software_video_encoder.cc',
+ 'sender/fake_software_video_encoder.h',
'sender/frame_sender.cc',
'sender/frame_sender.h',
+ 'sender/size_adaptable_video_encoder_base.cc',
+ 'sender/size_adaptable_video_encoder_base.h',
'sender/software_video_encoder.h',
+ 'sender/video_encoder.cc',
'sender/video_encoder.h',
- 'sender/video_encoder_impl.h',
'sender/video_encoder_impl.cc',
- 'sender/video_sender.h',
+ 'sender/video_encoder_impl.h',
+ 'sender/video_frame_factory.h',
'sender/video_sender.cc',
+ 'sender/video_sender.h',
'sender/vp8_encoder.cc',
'sender/vp8_encoder.h',
], # source
@@ -182,14 +185,33 @@
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
],
'sources!': [
- 'sender/external_video_encoder.h',
'sender/external_video_encoder.cc',
- 'sender/video_encoder_impl.h',
+ 'sender/external_video_encoder.h',
'sender/video_encoder_impl.cc',
+ 'sender/video_encoder_impl.h',
'sender/vp8_encoder.cc',
'sender/vp8_encoder.h',
],
}], # OS=="ios"
+ # iOS and OS X encoders
+ ['OS=="ios" or OS=="mac"', {
+ 'sources': [
+ 'sender/h264_vt_encoder.cc',
+ 'sender/h264_vt_encoder.h',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/CoreVideo.framework',
+ ],
+ },
+ }], # OS=="ios" or OS=="mac"
+ ['OS=="mac"', {
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
+ ],
+ },
+ }], # OS=="mac"
], # conditions
},
{
@@ -214,18 +236,20 @@
'net/pacing/paced_sender.cc',
'net/pacing/paced_sender.h',
'net/rtcp/receiver_rtcp_event_subscriber.cc',
+ 'net/rtcp/rtcp.cc',
+ 'net/rtcp/rtcp.h',
'net/rtcp/rtcp_builder.cc',
'net/rtcp/rtcp_builder.h',
'net/rtcp/rtcp_defines.cc',
'net/rtcp/rtcp_defines.h',
- 'net/rtcp/rtcp.h',
- 'net/rtcp/rtcp.cc',
'net/rtcp/rtcp_utility.cc',
'net/rtcp/rtcp_utility.h',
'net/rtp/packet_storage.cc',
'net/rtp/packet_storage.h',
'net/rtp/rtp_packetizer.cc',
'net/rtp/rtp_packetizer.h',
+ 'net/rtp/rtp_parser.cc',
+ 'net/rtp/rtp_parser.h',
'net/rtp/rtp_sender.cc',
'net/rtp/rtp_sender.h',
'net/udp_transport.cc',
diff --git a/chromium/media/cast/cast_config.cc b/chromium/media/cast/cast_config.cc
index 56b20f60633..e361f158c49 100644
--- a/chromium/media/cast/cast_config.cc
+++ b/chromium/media/cast/cast_config.cc
@@ -23,14 +23,11 @@ namespace cast {
VideoSenderConfig::VideoSenderConfig()
: ssrc(0),
- incoming_feedback_ssrc(0),
- rtcp_interval(kDefaultRtcpIntervalMs),
+ receiver_ssrc(0),
max_playout_delay(
base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs)),
rtp_payload_type(0),
use_external_encoder(false),
- width(0),
- height(0),
congestion_control_back_off(kDefaultCongestionControlBackOff),
max_bitrate(5000000),
min_bitrate(1000000),
@@ -46,8 +43,7 @@ VideoSenderConfig::~VideoSenderConfig() {}
AudioSenderConfig::AudioSenderConfig()
: ssrc(0),
- incoming_feedback_ssrc(0),
- rtcp_interval(kDefaultRtcpIntervalMs),
+ receiver_ssrc(0),
max_playout_delay(
base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs)),
rtp_payload_type(0),
@@ -60,14 +56,13 @@ AudioSenderConfig::AudioSenderConfig()
AudioSenderConfig::~AudioSenderConfig() {}
FrameReceiverConfig::FrameReceiverConfig()
- : feedback_ssrc(0),
- incoming_ssrc(0),
- rtcp_interval(kDefaultRtcpIntervalMs),
+ : receiver_ssrc(0),
+ sender_ssrc(0),
rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
rtp_payload_type(0),
- frequency(0),
+ rtp_timebase(0),
channels(0),
- max_frame_rate(0),
+ target_frame_rate(0),
codec(CODEC_UNKNOWN) {}
FrameReceiverConfig::~FrameReceiverConfig() {}
diff --git a/chromium/media/cast/cast_config.h b/chromium/media/cast/cast_config.h
index 62ad9c23534..36ed104f93b 100644
--- a/chromium/media/cast/cast_config.h
+++ b/chromium/media/cast/cast_config.h
@@ -33,9 +33,7 @@ struct AudioSenderConfig {
uint32 ssrc;
// The receiver's SSRC identifier.
- uint32 incoming_feedback_ssrc;
-
- int rtcp_interval;
+ uint32 receiver_ssrc;
// The total amount of time between a frame's capture/recording on the sender
// and its playback on the receiver (i.e., shown to a user). This should be
@@ -70,9 +68,7 @@ struct VideoSenderConfig {
uint32 ssrc;
// The receiver's SSRC identifier.
- uint32 incoming_feedback_ssrc; // TODO(miu): Rename to receiver_ssrc.
-
- int rtcp_interval;
+ uint32 receiver_ssrc;
// The total amount of time between a frame's capture/recording on the sender
// and its playback on the receiver (i.e., shown to a user). This should be
@@ -87,8 +83,6 @@ struct VideoSenderConfig {
int rtp_payload_type;
bool use_external_encoder;
- int width; // Incoming frames will be scaled to this size.
- int height;
float congestion_control_back_off;
int max_bitrate;
@@ -97,7 +91,19 @@ struct VideoSenderConfig {
int max_qp;
int min_qp;
int max_frame_rate; // TODO(miu): Should be double, not int.
- int max_number_of_video_buffers_used; // Max value depend on codec.
+
+ // This field is used differently by various encoders. It defaults to 1.
+ //
+ // For VP8, it should be 1 to operate in single-buffer mode, or 3 to operate
+ // in multi-buffer mode. See
+ // http://www.webmproject.org/docs/encoder-parameters/ for details.
+ //
+ // For H.264 on Mac or iOS, it controls the max number of frames the encoder
+ // may hold before emitting a frame. A larger window may allow higher encoding
+ // efficiency at the cost of latency and memory. Set to 0 to let the encoder
+ // choose a suitable value for the platform and other encoding settings.
+ int max_number_of_video_buffers_used;
+
Codec codec;
int number_of_encode_threads;
@@ -114,14 +120,10 @@ struct FrameReceiverConfig {
~FrameReceiverConfig();
// The receiver's SSRC identifier.
- uint32 feedback_ssrc; // TODO(miu): Rename to receiver_ssrc for clarity.
+ uint32 receiver_ssrc;
// The sender's SSRC identifier.
- uint32 incoming_ssrc; // TODO(miu): Rename to sender_ssrc for clarity.
-
- // Mean interval (in milliseconds) between RTCP reports.
- // TODO(miu): Remove this since it's never not kDefaultRtcpIntervalMs.
- int rtcp_interval;
+ uint32 sender_ssrc;
// The total amount of time between a frame's capture/recording on the sender
// and its playback on the receiver (i.e., shown to a user). This is fixed as
@@ -136,7 +138,7 @@ struct FrameReceiverConfig {
// RTP timebase: The number of RTP units advanced per one second. For audio,
// this is the sampling rate. For video, by convention, this is 90 kHz.
- int frequency; // TODO(miu): Rename to rtp_timebase for clarity.
+ int rtp_timebase;
// Number of channels. For audio, this is normally 2. For video, this must
// be 1 as Cast does not have support for stereoscopic video.
@@ -145,7 +147,7 @@ struct FrameReceiverConfig {
// The target frame rate. For audio, this is normally 100 (i.e., frames have
// a duration of 10ms each). For video, this is normally 30, but any frame
// rate is supported.
- int max_frame_rate; // TODO(miu): Rename to target_frame_rate.
+ int target_frame_rate;
// Codec used for the compression of signal data.
// TODO(miu): Merge the AudioCodec and VideoCodec enums into one so this union
@@ -164,8 +166,11 @@ struct FrameReceiverConfig {
typedef Packet Packet;
typedef PacketList PacketList;
-typedef base::Callback<void(CastInitializationStatus)>
- CastInitializationCallback;
+// Callback that is run to update the client with current status. This is used
+// to allow the client to wait for asynchronous initialization to complete
+// before sending frames, and also to be notified of any runtime errors that
+// have halted the session.
+typedef base::Callback<void(OperationalStatus)> StatusChangeCallback;
typedef base::Callback<void(scoped_refptr<base::SingleThreadTaskRunner>,
scoped_ptr<media::VideoEncodeAccelerator>)>
diff --git a/chromium/media/cast/cast_defines.h b/chromium/media/cast/cast_defines.h
index 661c0952492..9bf2f44352c 100644
--- a/chromium/media/cast/cast_defines.h
+++ b/chromium/media/cast/cast_defines.h
@@ -36,24 +36,39 @@ const int kMaxUnackedFrames = 120;
const int64 kCastMessageUpdateIntervalMs = 33;
const int64 kNackRepeatIntervalMs = 30;
-enum CastInitializationStatus {
- STATUS_AUDIO_UNINITIALIZED,
- STATUS_VIDEO_UNINITIALIZED,
- STATUS_AUDIO_INITIALIZED,
- STATUS_VIDEO_INITIALIZED,
- STATUS_INVALID_CAST_ENVIRONMENT,
- STATUS_INVALID_CRYPTO_CONFIGURATION,
- STATUS_UNSUPPORTED_AUDIO_CODEC,
- STATUS_UNSUPPORTED_VIDEO_CODEC,
- STATUS_INVALID_AUDIO_CONFIGURATION,
- STATUS_INVALID_VIDEO_CONFIGURATION,
- STATUS_HW_VIDEO_ENCODER_NOT_SUPPORTED,
+// Success/in-progress/failure status codes bubbled up to clients via
+// StatusChangeCallbacks.
+enum OperationalStatus {
+ // Client should not send frames yet (sender), or should not expect to receive
+ // frames yet (receiver).
+ STATUS_UNINITIALIZED,
+
+ // Client may now send or receive frames.
+ STATUS_INITIALIZED,
+
+ // Codec is being re-initialized. Client may continue sending frames, but
+ // some may be ignored/dropped until a transition back to STATUS_INITIALIZED.
+ STATUS_CODEC_REINIT_PENDING,
+
+ // Session has halted due to invalid configuration.
+ STATUS_INVALID_CONFIGURATION,
+
+ // Session has halted due to an unsupported codec.
+ STATUS_UNSUPPORTED_CODEC,
+
+ // Session has halted due to a codec initialization failure. Note that this
+ // can be reported after STATUS_INITIALIZED/STATUS_CODEC_REINIT_PENDING if the
+ // codec was re-initialized during the session.
+ STATUS_CODEC_INIT_FAILED,
+
+ // Session has halted due to a codec runtime failure.
+ STATUS_CODEC_RUNTIME_ERROR,
};
enum DefaultSettings {
kDefaultAudioEncoderBitrate = 0, // This means "auto," and may mean VBR.
kDefaultAudioSamplingRate = 48000,
- kDefaultMaxQp = 56,
+ kDefaultMaxQp = 63,
kDefaultMinQp = 4,
kDefaultMaxFrameRate = 30,
kDefaultNumberOfVideoBuffers = 1,
diff --git a/chromium/media/cast/cast_environment.h b/chromium/media/cast/cast_environment.h
index d3c9474fa3f..83b5ce3e6ae 100644
--- a/chromium/media/cast/cast_environment.h
+++ b/chromium/media/cast/cast_environment.h
@@ -71,7 +71,7 @@ class CastEnvironment : public base::RefCountedThreadSafe<CastEnvironment> {
protected:
virtual ~CastEnvironment();
- // Subclasses may override these.
+ // Subclasses may final these.
scoped_refptr<base::SingleThreadTaskRunner> main_thread_proxy_;
scoped_refptr<base::SingleThreadTaskRunner> audio_thread_proxy_;
scoped_refptr<base::SingleThreadTaskRunner> video_thread_proxy_;
diff --git a/chromium/media/cast/cast_receiver.h b/chromium/media/cast/cast_receiver.h
index f57942231d9..a2668c740e2 100644
--- a/chromium/media/cast/cast_receiver.h
+++ b/chromium/media/cast/cast_receiver.h
@@ -16,6 +16,7 @@
#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/net/cast_transport_sender.h"
namespace media {
class VideoFrame;
@@ -51,13 +52,11 @@ class CastReceiver {
scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- PacketSender* const packet_sender);
+ CastTransportSender* const transport);
// All received RTP and RTCP packets for the call should be sent to this
// PacketReceiver. Can be called from any thread.
- // TODO(hubbe): Replace with:
- // virtual void ReceivePacket(scoped_ptr<Packet> packet) = 0;
- virtual PacketReceiverCallback packet_receiver() = 0;
+ virtual void ReceivePacket(scoped_ptr<Packet> packet) = 0;
// Polling interface to get audio and video frames from the CastReceiver. The
// the RequestDecodedXXXXXFrame() methods utilize internal software-based
diff --git a/chromium/media/cast/cast_sender.h b/chromium/media/cast/cast_sender.h
index 7615c28602c..7c6891312b4 100644
--- a/chromium/media/cast/cast_sender.h
+++ b/chromium/media/cast/cast_sender.h
@@ -21,6 +21,10 @@
#include "media/cast/cast_environment.h"
#include "media/cast/net/cast_transport_sender.h"
+namespace gfx {
+class Size;
+}
+
namespace media {
class VideoFrame;
@@ -36,6 +40,23 @@ class VideoFrameInput : public base::RefCountedThreadSafe<VideoFrameInput> {
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time) = 0;
+ // Creates a |VideoFrame| optimized for the encoder. When available, these
+ // frames offer performance benefits, such as memory copy elimination. The
+ // format is guaranteed to be I420 or NV12.
+ //
+ // Not every encoder supports this method. Use |CanCreateOptimizedFrames| to
+ // determine if you can and should use this method.
+ //
+ // Even if |CanCreateOptimizedFrames| indicates support, there are transient
+ // conditions during a session where optimized frames cannot be provided. In
+ // this case, the caller must be able to account for a nullptr return value
+ // and instantiate its own media::VideoFrames.
+ virtual scoped_refptr<VideoFrame> MaybeCreateOptimizedFrame(
+ const gfx::Size& frame_size, base::TimeDelta timestamp) = 0;
+
+ // Returns true if the encoder supports creating optimized frames.
+ virtual bool CanCreateOptimizedFrames() const = 0;
+
protected:
virtual ~VideoFrameInput() {}
@@ -74,16 +95,18 @@ class CastSender {
virtual scoped_refptr<AudioFrameInput> audio_frame_input() = 0;
// Initialize the audio stack. Must be called in order to send audio frames.
- // Status of the initialization will be returned on cast_initialization_cb.
+ // |status_change_cb| will be run as operational status changes.
virtual void InitializeAudio(
const AudioSenderConfig& audio_config,
- const CastInitializationCallback& cast_initialization_cb) = 0;
+ const StatusChangeCallback& status_change_cb) = 0;
// Initialize the video stack. Must be called in order to send video frames.
- // Status of the initialization will be returned on cast_initialization_cb.
+ // |status_change_cb| will be run as operational status changes.
+ //
+ // TODO(miu): Remove the VEA-specific callbacks. http://crbug.com/454029
virtual void InitializeVideo(
const VideoSenderConfig& video_config,
- const CastInitializationCallback& cast_initialization_cb,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb) = 0;
diff --git a/chromium/media/cast/cast_sender_impl.cc b/chromium/media/cast/cast_sender_impl.cc
index 571e92ef540..9c6266c32b6 100644
--- a/chromium/media/cast/cast_sender_impl.cc
+++ b/chromium/media/cast/cast_sender_impl.cc
@@ -9,6 +9,7 @@
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "media/base/video_frame.h"
+#include "media/cast/sender/video_frame_factory.h"
namespace media {
namespace cast {
@@ -19,10 +20,14 @@ class LocalVideoFrameInput : public VideoFrameInput {
public:
LocalVideoFrameInput(scoped_refptr<CastEnvironment> cast_environment,
base::WeakPtr<VideoSender> video_sender)
- : cast_environment_(cast_environment), video_sender_(video_sender) {}
+ : cast_environment_(cast_environment),
+ video_sender_(video_sender),
+ video_frame_factory_(
+ video_sender.get() ?
+ video_sender->CreateVideoFrameFactory().release() : nullptr) {}
void InsertRawVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time) override {
+ const base::TimeTicks& capture_time) final {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(&VideoSender::InsertRawVideoFrame,
@@ -31,14 +36,26 @@ class LocalVideoFrameInput : public VideoFrameInput {
capture_time));
}
+ scoped_refptr<VideoFrame> MaybeCreateOptimizedFrame(
+ const gfx::Size& frame_size,
+ base::TimeDelta timestamp) final {
+ return video_frame_factory_ ?
+ video_frame_factory_->MaybeCreateFrame(frame_size, timestamp) : nullptr;
+ }
+
+ bool CanCreateOptimizedFrames() const final {
+ return video_frame_factory_.get() != nullptr;
+ }
+
protected:
- ~LocalVideoFrameInput() override {}
+ ~LocalVideoFrameInput() final {}
private:
friend class base::RefCountedThreadSafe<LocalVideoFrameInput>;
- scoped_refptr<CastEnvironment> cast_environment_;
- base::WeakPtr<VideoSender> video_sender_;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ const base::WeakPtr<VideoSender> video_sender_;
+ const scoped_ptr<VideoFrameFactory> video_frame_factory_;
DISALLOW_COPY_AND_ASSIGN(LocalVideoFrameInput);
};
@@ -52,7 +69,7 @@ class LocalAudioFrameInput : public AudioFrameInput {
: cast_environment_(cast_environment), audio_sender_(audio_sender) {}
void InsertAudio(scoped_ptr<AudioBus> audio_bus,
- const base::TimeTicks& recorded_time) override {
+ const base::TimeTicks& recorded_time) final {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(&AudioSender::InsertAudio,
@@ -62,7 +79,7 @@ class LocalAudioFrameInput : public AudioFrameInput {
}
protected:
- ~LocalAudioFrameInput() override {}
+ ~LocalAudioFrameInput() final {}
private:
friend class base::RefCountedThreadSafe<LocalAudioFrameInput>;
@@ -92,7 +109,7 @@ CastSenderImpl::CastSenderImpl(
void CastSenderImpl::InitializeAudio(
const AudioSenderConfig& audio_config,
- const CastInitializationCallback& cast_initialization_cb) {
+ const StatusChangeCallback& status_change_cb) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
CHECK(audio_config.use_external_encoder ||
cast_environment_->HasAudioThread());
@@ -100,14 +117,12 @@ void CastSenderImpl::InitializeAudio(
VLOG(1) << "CastSenderImpl@" << this << "::InitializeAudio()";
audio_sender_.reset(
- new AudioSender(cast_environment_, audio_config, transport_sender_));
-
- const CastInitializationStatus status = audio_sender_->InitializationResult();
- if (status == STATUS_AUDIO_INITIALIZED) {
- audio_frame_input_ =
- new LocalAudioFrameInput(cast_environment_, audio_sender_->AsWeakPtr());
- }
- cast_initialization_cb.Run(status);
+ new AudioSender(cast_environment_,
+ audio_config,
+ base::Bind(&CastSenderImpl::OnAudioStatusChange,
+ weak_factory_.GetWeakPtr(),
+ status_change_cb),
+ transport_sender_));
if (video_sender_) {
DCHECK(audio_sender_->GetTargetPlayoutDelay() ==
video_sender_->GetTargetPlayoutDelay());
@@ -116,20 +131,19 @@ void CastSenderImpl::InitializeAudio(
void CastSenderImpl::InitializeVideo(
const VideoSenderConfig& video_config,
- const CastInitializationCallback& cast_initialization_cb,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- CHECK(video_config.use_external_encoder ||
- cast_environment_->HasVideoThread());
VLOG(1) << "CastSenderImpl@" << this << "::InitializeVideo()";
video_sender_.reset(new VideoSender(
cast_environment_,
video_config,
- base::Bind(&CastSenderImpl::OnVideoInitialized,
- weak_factory_.GetWeakPtr(), cast_initialization_cb),
+ base::Bind(&CastSenderImpl::OnVideoStatusChange,
+ weak_factory_.GetWeakPtr(),
+ status_change_cb),
create_vea_cb,
create_video_encode_mem_cb,
transport_sender_,
@@ -165,13 +179,26 @@ void CastSenderImpl::SetTargetPlayoutDelay(
}
}
-void CastSenderImpl::OnVideoInitialized(
- const CastInitializationCallback& initialization_cb,
- media::cast::CastInitializationStatus result) {
+void CastSenderImpl::OnAudioStatusChange(
+ const StatusChangeCallback& status_change_cb,
+ OperationalStatus status) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (status == STATUS_INITIALIZED && !audio_frame_input_) {
+ audio_frame_input_ =
+ new LocalAudioFrameInput(cast_environment_, audio_sender_->AsWeakPtr());
+ }
+ status_change_cb.Run(status);
+}
+
+void CastSenderImpl::OnVideoStatusChange(
+ const StatusChangeCallback& status_change_cb,
+ OperationalStatus status) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- video_frame_input_ =
- new LocalVideoFrameInput(cast_environment_, video_sender_->AsWeakPtr());
- initialization_cb.Run(result);
+ if (status == STATUS_INITIALIZED && !video_frame_input_) {
+ video_frame_input_ =
+ new LocalVideoFrameInput(cast_environment_, video_sender_->AsWeakPtr());
+ }
+ status_change_cb.Run(status);
}
} // namespace cast
diff --git a/chromium/media/cast/cast_sender_impl.h b/chromium/media/cast/cast_sender_impl.h
index b76603e498b..341bcbc0675 100644
--- a/chromium/media/cast/cast_sender_impl.h
+++ b/chromium/media/cast/cast_sender_impl.h
@@ -27,30 +27,29 @@ class CastSenderImpl : public CastSender {
CastSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
CastTransportSender* const transport_sender);
- void InitializeAudio(
- const AudioSenderConfig& audio_config,
- const CastInitializationCallback& cast_initialization_cb) override;
+ void InitializeAudio(const AudioSenderConfig& audio_config,
+ const StatusChangeCallback& status_change_cb) final;
void InitializeVideo(
const VideoSenderConfig& video_config,
- const CastInitializationCallback& cast_initialization_cb,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb)
- override;
+ final;
- void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay) override;
+ void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay) final;
- ~CastSenderImpl() override;
+ ~CastSenderImpl() final;
- scoped_refptr<AudioFrameInput> audio_frame_input() override;
- scoped_refptr<VideoFrameInput> video_frame_input() override;
+ scoped_refptr<AudioFrameInput> audio_frame_input() final;
+ scoped_refptr<VideoFrameInput> video_frame_input() final;
private:
void ReceivedPacket(scoped_ptr<Packet> packet);
- void OnVideoInitialized(
- const CastInitializationCallback& initialization_cb,
- media::cast::CastInitializationStatus result);
+ void OnAudioStatusChange(const StatusChangeCallback& status_change_cb,
+ OperationalStatus status);
+ void OnVideoStatusChange(const StatusChangeCallback& status_change_cb,
+ OperationalStatus status);
- CastInitializationCallback initialization_callback_;
scoped_ptr<AudioSender> audio_sender_;
scoped_ptr<VideoSender> video_sender_;
scoped_refptr<AudioFrameInput> audio_frame_input_;
diff --git a/chromium/media/cast/cast_testing.gypi b/chromium/media/cast/cast_testing.gypi
index a2af2572275..fbc4d47ff26 100644
--- a/chromium/media/cast/cast_testing.gypi
+++ b/chromium/media/cast/cast_testing.gypi
@@ -25,12 +25,12 @@
'test/fake_media_source.h',
'test/fake_single_thread_task_runner.cc',
'test/fake_single_thread_task_runner.h',
+ 'test/loopback_transport.cc',
+ 'test/loopback_transport.h',
'test/skewed_single_thread_task_runner.cc',
'test/skewed_single_thread_task_runner.h',
'test/skewed_tick_clock.cc',
'test/skewed_tick_clock.h',
- 'test/loopback_transport.cc',
- 'test/loopback_transport.h',
'test/utility/audio_utility.cc',
'test/utility/audio_utility.h',
'test/utility/barcode.cc',
@@ -45,10 +45,10 @@
'test/utility/net_utility.h',
'test/utility/standalone_cast_environment.cc',
'test/utility/standalone_cast_environment.h',
- 'test/utility/video_utility.cc',
- 'test/utility/video_utility.h',
'test/utility/udp_proxy.cc',
'test/utility/udp_proxy.h',
+ 'test/utility/video_utility.cc',
+ 'test/utility/video_utility.h',
], # source
},
{
@@ -75,21 +75,23 @@
'sources': [
'<(DEPTH)/media/base/run_all_unittests.cc',
'logging/encoding_event_subscriber_unittest.cc',
- 'logging/serialize_deserialize_test.cc',
'logging/logging_impl_unittest.cc',
'logging/logging_raw_unittest.cc',
'logging/receiver_time_offset_estimator_impl_unittest.cc',
+ 'logging/serialize_deserialize_test.cc',
'logging/simple_event_subscriber_unittest.cc',
'logging/stats_event_subscriber_unittest.cc',
'net/cast_transport_sender_impl_unittest.cc',
'net/frame_id_wrap_helper_test.cc',
+ 'net/mock_cast_transport_sender.cc',
+ 'net/mock_cast_transport_sender.h',
'net/pacing/mock_paced_packet_sender.cc',
'net/pacing/mock_paced_packet_sender.h',
'net/pacing/paced_sender_unittest.cc',
+ 'net/rtcp/receiver_rtcp_event_subscriber_unittest.cc',
'net/rtcp/rtcp_builder_unittest.cc',
'net/rtcp/rtcp_unittest.cc',
'net/rtcp/rtcp_utility_unittest.cc',
- 'net/rtcp/receiver_rtcp_event_subscriber_unittest.cc',
# TODO(miu): The following two are test utility modules. Rename/move the files.
'net/rtcp/test_rtcp_packet_builder.cc',
'net/rtcp/test_rtcp_packet_builder.h',
@@ -103,8 +105,8 @@
'net/rtp/rtp_header_parser.cc',
'net/rtp/rtp_header_parser.h',
'net/rtp/rtp_packet_builder.cc',
- 'net/rtp/rtp_parser_unittest.cc',
'net/rtp/rtp_packetizer_unittest.cc',
+ 'net/rtp/rtp_parser_unittest.cc',
'net/rtp/rtp_receiver_defines.h',
'net/udp_transport_unittest.cc',
'receiver/audio_decoder_unittest.cc',
@@ -113,16 +115,15 @@
'sender/audio_encoder_unittest.cc',
'sender/audio_sender_unittest.cc',
'sender/congestion_control_unittest.cc',
- 'sender/external_video_encoder_unittest.cc',
- 'sender/video_encoder_impl_unittest.cc',
+ 'sender/fake_video_encode_accelerator_factory.cc',
+ 'sender/fake_video_encode_accelerator_factory.h',
+ 'sender/video_encoder_unittest.cc',
'sender/video_sender_unittest.cc',
'test/end2end_unittest.cc',
'test/fake_receiver_time_offset_estimator.cc',
'test/fake_receiver_time_offset_estimator.h',
'test/fake_single_thread_task_runner.cc',
'test/fake_single_thread_task_runner.h',
- 'test/fake_video_encode_accelerator.cc',
- 'test/fake_video_encode_accelerator.h',
'test/utility/audio_utility_unittest.cc',
'test/utility/barcode_unittest.cc',
], # source
@@ -147,8 +148,6 @@
'test/cast_benchmarks.cc',
'test/fake_single_thread_task_runner.cc',
'test/fake_single_thread_task_runner.h',
- 'test/fake_video_encode_accelerator.cc',
- 'test/fake_video_encode_accelerator.h',
'test/utility/test_util.cc',
'test/utility/test_util.h',
], # source
@@ -340,6 +339,46 @@
}
]
}
- ]
- ], # targets
+ ],
+ ['OS=="ios" or OS=="mac"', {
+ 'targets': [
+ {
+ # GN version: //media/cast:cast_h264_vt_encoder_unittests
+ 'target_name': 'cast_h264_vt_encoder_unittests',
+ 'type': '<(gtest_target_type)',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_sender',
+ 'cast_test_utility',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ '<(DEPTH)/testing/gmock.gyp:gmock',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'sender/h264_vt_encoder_unittest.cc',
+ ],
+ }], # targets
+ }], # OS=="ios" or OS=="mac"
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'cast_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cast_unittests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'cast_unittests.isolate',
+ ],
+ },
+ ],
+ }],
+ ], # conditions
}
diff --git a/chromium/media/cast/cast_unittests.isolate b/chromium/media/cast/cast_unittests.isolate
new file mode 100644
index 00000000000..26b93953e73
--- /dev/null
+++ b/chromium/media/cast/cast_unittests.isolate
@@ -0,0 +1,84 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['use_x11==0', {
+ 'variables': {
+ 'command': [
+ '../../testing/test_env.py',
+ '<(PRODUCT_DIR)/cast_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ },
+ }],
+ ['use_x11==1', {
+ 'variables': {
+ 'command': [
+ '../../testing/xvfb.py',
+ '<(PRODUCT_DIR)',
+ '<(PRODUCT_DIR)/cast_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ 'files': [
+ '../../testing/xvfb.py',
+ '<(PRODUCT_DIR)/xdisplaycheck<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ ['OS=="linux"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/libffmpegsumo.so',
+ ],
+ },
+ }],
+ ['OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ '../../testing/test_env.py',
+ '<(PRODUCT_DIR)/cast_unittests<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ ['OS=="mac"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/ffmpegsumo.so',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1 and fastbuild==0', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/cast_unittests.dSYM/',
+ ],
+ },
+ }],
+ ['OS=="win"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/ffmpegsumo.dll',
+ ],
+ },
+ }],
+ ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/cast_unittests.exe.pdb',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ '../../base/base.isolate',
+ ],
+}
diff --git a/chromium/media/cast/logging/encoding_event_subscriber.h b/chromium/media/cast/logging/encoding_event_subscriber.h
index a67e5bca2bd..f72fe33b534 100644
--- a/chromium/media/cast/logging/encoding_event_subscriber.h
+++ b/chromium/media/cast/logging/encoding_event_subscriber.h
@@ -49,11 +49,11 @@ class EncodingEventSubscriber : public RawEventSubscriber {
// timestamp).
EncodingEventSubscriber(EventMediaType event_media_type, size_t max_frames);
- ~EncodingEventSubscriber() override;
+ ~EncodingEventSubscriber() final;
// RawReventSubscriber implementations.
- void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
- void OnReceivePacketEvent(const PacketEvent& packet_event) override;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) final;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) final;
// Assigns frame events and packet events received so far to |frame_events|
// and |packet_events| and resets the internal state.
diff --git a/chromium/media/cast/logging/log_deserializer.cc b/chromium/media/cast/logging/log_deserializer.cc
index 1c6dd572240..5e5b189115c 100644
--- a/chromium/media/cast/logging/log_deserializer.cc
+++ b/chromium/media/cast/logging/log_deserializer.cc
@@ -4,7 +4,6 @@
#include "media/cast/logging/log_deserializer.h"
-#include <map>
#include <utility>
#include "base/big_endian.h"
diff --git a/chromium/media/cast/logging/logging_impl.cc b/chromium/media/cast/logging/logging_impl.cc
index 1143d1be217..b5351e3c6e7 100644
--- a/chromium/media/cast/logging/logging_impl.cc
+++ b/chromium/media/cast/logging/logging_impl.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "base/big_endian.h"
-#include "base/debug/trace_event.h"
+#include "base/trace_event/trace_event.h"
#include "media/cast/logging/logging_impl.h"
namespace media {
diff --git a/chromium/media/cast/logging/receiver_time_offset_estimator_impl.h b/chromium/media/cast/logging/receiver_time_offset_estimator_impl.h
index bc5348b90e4..c2b6455ec10 100644
--- a/chromium/media/cast/logging/receiver_time_offset_estimator_impl.h
+++ b/chromium/media/cast/logging/receiver_time_offset_estimator_impl.h
@@ -36,15 +36,15 @@ class ReceiverTimeOffsetEstimatorImpl : public ReceiverTimeOffsetEstimator {
public:
ReceiverTimeOffsetEstimatorImpl();
- ~ReceiverTimeOffsetEstimatorImpl() override;
+ ~ReceiverTimeOffsetEstimatorImpl() final;
// RawEventSubscriber implementations.
- void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
- void OnReceivePacketEvent(const PacketEvent& packet_event) override;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) final;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) final;
// ReceiverTimeOffsetEstimator implementation.
bool GetReceiverOffsetBounds(base::TimeDelta* lower_bound,
- base::TimeDelta* upper_bound) override;
+ base::TimeDelta* upper_bound) final;
private:
// This helper uses the difference between sent and recived event
diff --git a/chromium/media/cast/logging/simple_event_subscriber.cc b/chromium/media/cast/logging/simple_event_subscriber.cc
index 984d8f7d830..cad9956a6e0 100644
--- a/chromium/media/cast/logging/simple_event_subscriber.cc
+++ b/chromium/media/cast/logging/simple_event_subscriber.cc
@@ -4,7 +4,6 @@
#include "media/cast/logging/simple_event_subscriber.h"
-#include <vector>
#include "base/logging.h"
diff --git a/chromium/media/cast/logging/simple_event_subscriber.h b/chromium/media/cast/logging/simple_event_subscriber.h
index 176ab8c0d7b..1bdce4539f9 100644
--- a/chromium/media/cast/logging/simple_event_subscriber.h
+++ b/chromium/media/cast/logging/simple_event_subscriber.h
@@ -22,11 +22,11 @@ class SimpleEventSubscriber : public RawEventSubscriber {
public:
SimpleEventSubscriber();
- ~SimpleEventSubscriber() override;
+ ~SimpleEventSubscriber() final;
// RawEventSubscriber implementations.
- void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
- void OnReceivePacketEvent(const PacketEvent& packet_event) override;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) final;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) final;
// Assigns frame events received so far to |frame_events| and clears them
// from this object.
diff --git a/chromium/media/cast/logging/stats_event_subscriber.cc b/chromium/media/cast/logging/stats_event_subscriber.cc
index c42affa401e..9d68710bed0 100644
--- a/chromium/media/cast/logging/stats_event_subscriber.cc
+++ b/chromium/media/cast/logging/stats_event_subscriber.cc
@@ -154,11 +154,12 @@ void StatsEventSubscriber::OnReceiveFrameEvent(const FrameEvent& frame_event) {
} else if (type == FRAME_PLAYOUT) {
RecordE2ELatency(frame_event);
base::TimeDelta delay_delta = frame_event.delay_delta;
- histograms_[PLAYOUT_DELAY_MS_HISTO]->Add(delay_delta.InMillisecondsF());
// Positive delay_delta means the frame is late.
- if (delay_delta > base::TimeDelta())
+ if (delay_delta > base::TimeDelta()) {
num_frames_late_++;
+ histograms_[LATE_FRAME_MS_HISTO]->Add(delay_delta.InMillisecondsF());
+ }
}
if (is_receiver_event)
@@ -315,7 +316,7 @@ const char* StatsEventSubscriber::CastStatToString(CastStat stat) {
STAT_ENUM_TO_STRING(PACKET_LATENCY_MS_HISTO);
STAT_ENUM_TO_STRING(FRAME_LATENCY_MS_HISTO);
STAT_ENUM_TO_STRING(E2E_LATENCY_MS_HISTO);
- STAT_ENUM_TO_STRING(PLAYOUT_DELAY_MS_HISTO);
+ STAT_ENUM_TO_STRING(LATE_FRAME_MS_HISTO);
}
NOTREACHED();
return "";
@@ -348,9 +349,9 @@ void StatsEventSubscriber::InitHistograms() {
histograms_[FRAME_LATENCY_MS_HISTO].reset(
new SimpleHistogram(0, kDefaultMaxLatencyBucketMs,
kDefaultBucketWidthMs));
- histograms_[PLAYOUT_DELAY_MS_HISTO].reset(
- new SimpleHistogram(0, kSmallMaxLatencyBucketMs,
- kSmallBucketWidthMs));
+ histograms_[LATE_FRAME_MS_HISTO].reset(
+ new SimpleHistogram(0, kDefaultMaxLatencyBucketMs,
+ kDefaultBucketWidthMs));
histograms_[CAPTURE_LATENCY_MS_HISTO].reset(
new SimpleHistogram(0, kSmallMaxLatencyBucketMs,
kSmallBucketWidthMs));
@@ -576,9 +577,7 @@ void StatsEventSubscriber::RecordE2ELatency(const FrameEvent& frame_event) {
if (it == recent_frame_infos_.end())
return;
- // Playout time is event time + playout delay.
- base::TimeTicks playout_time =
- frame_event.timestamp + frame_event.delay_delta - receiver_offset;
+ base::TimeTicks playout_time = frame_event.timestamp - receiver_offset;
base::TimeDelta latency = playout_time - it->second.capture_time;
total_e2e_latency_ += latency;
e2e_latency_datapoints_++;
diff --git a/chromium/media/cast/logging/stats_event_subscriber.h b/chromium/media/cast/logging/stats_event_subscriber.h
index 3e8c83b153a..36d51107773 100644
--- a/chromium/media/cast/logging/stats_event_subscriber.h
+++ b/chromium/media/cast/logging/stats_event_subscriber.h
@@ -32,11 +32,11 @@ class StatsEventSubscriber : public RawEventSubscriber {
base::TickClock* clock,
ReceiverTimeOffsetEstimator* offset_estimator);
- ~StatsEventSubscriber() override;
+ ~StatsEventSubscriber() final;
// RawReventSubscriber implementations.
- void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
- void OnReceivePacketEvent(const PacketEvent& packet_event) override;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) final;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) final;
// Returns stats as a DictionaryValue. The dictionary contains one entry -
// "audio" or "video" pointing to an inner dictionary.
@@ -167,7 +167,7 @@ class StatsEventSubscriber : public RawEventSubscriber {
PACKET_LATENCY_MS_HISTO,
FRAME_LATENCY_MS_HISTO,
E2E_LATENCY_MS_HISTO,
- PLAYOUT_DELAY_MS_HISTO
+ LATE_FRAME_MS_HISTO
};
struct FrameInfo {
diff --git a/chromium/media/cast/logging/stats_event_subscriber_unittest.cc b/chromium/media/cast/logging/stats_event_subscriber_unittest.cc
index 570e2ccf3a5..4501454cd2a 100644
--- a/chromium/media/cast/logging/stats_event_subscriber_unittest.cc
+++ b/chromium/media/cast/logging/stats_event_subscriber_unittest.cc
@@ -289,7 +289,7 @@ TEST_F(StatsEventSubscriberTest, E2ELatency) {
int delay_micros = base::RandInt(-50000, 50000);
base::TimeDelta delay = base::TimeDelta::FromMilliseconds(delay_micros);
- total_latency += latency + delay;
+ total_latency += latency;
cast_environment_->Logging()->InsertFrameEventWithDelay(
receiver_clock_.NowTicks(),
@@ -585,6 +585,14 @@ TEST_F(StatsEventSubscriberTest, Histograms) {
123);
}
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ receiver_clock_.NowTicks(),
+ FRAME_PLAYOUT,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ base::TimeDelta::FromMilliseconds(100));
+
StatsEventSubscriber::SimpleHistogram* histogram;
scoped_ptr<base::ListValue> values;
@@ -621,6 +629,12 @@ TEST_F(StatsEventSubscriberTest, Histograms) {
ASSERT_TRUE(histogram);
values = histogram->GetHistogram().Pass();
EXPECT_TRUE(CheckHistogramHasValue(values.get(), "400-419", 3));
+
+ histogram = subscriber_->GetHistogramForTesting(
+ StatsEventSubscriber::LATE_FRAME_MS_HISTO);
+ ASSERT_TRUE(histogram);
+ values = histogram->GetHistogram().Pass();
+ EXPECT_TRUE(CheckHistogramHasValue(values.get(), "100-119", 1));
}
} // namespace cast
diff --git a/chromium/media/cast/net/cast_transport_config.h b/chromium/media/cast/net/cast_transport_config.h
index 0f101d896ed..f73d0a78cda 100644
--- a/chromium/media/cast/net/cast_transport_config.h
+++ b/chromium/media/cast/net/cast_transport_config.h
@@ -127,6 +127,8 @@ typedef scoped_refptr<base::RefCountedData<Packet> > PacketRef;
typedef std::vector<PacketRef> PacketList;
typedef base::Callback<void(scoped_ptr<Packet> packet)> PacketReceiverCallback;
+typedef base::Callback<bool(scoped_ptr<Packet> packet)>
+ PacketReceiverCallbackWithStatus;
class PacketSender {
public:
diff --git a/chromium/media/cast/net/cast_transport_sender.h b/chromium/media/cast/net/cast_transport_sender.h
index 46031a886e9..fc88a5c350b 100644
--- a/chromium/media/cast/net/cast_transport_sender.h
+++ b/chromium/media/cast/net/cast_transport_sender.h
@@ -40,6 +40,8 @@ class NetLog;
namespace media {
namespace cast {
+struct RtpReceiverStatistics;
+struct RtcpTimeData;
// Following the initialization of either audio or video an initialization
// status will be sent via this callback.
@@ -56,11 +58,13 @@ class CastTransportSender : public base::NonThreadSafe {
static scoped_ptr<CastTransportSender> Create(
net::NetLog* net_log,
base::TickClock* clock,
+ const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
scoped_ptr<base::DictionaryValue> options,
const CastTransportStatusCallback& status_callback,
const BulkRawEventsCallback& raw_events_callback,
base::TimeDelta raw_events_callback_interval,
+ const PacketReceiverCallback& packet_callback,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner);
virtual ~CastTransportSender() {}
@@ -100,6 +104,23 @@ class CastTransportSender : public base::NonThreadSafe {
// Returns a callback for receiving packets for testing purposes.
virtual PacketReceiverCallback PacketReceiverForTesting();
+
+ // The following functions are needed for receving.
+
+ // Add a valid SSRC. This is used to verify that incoming packets
+ // come from the right sender. Without valid SSRCs, the return address cannot
+ // be automatically established.
+ virtual void AddValidSsrc(uint32 ssrc) = 0;
+
+ // Send an RTCP message from receiver to sender.
+ virtual void SendRtcpFromRtpReceiver(
+ uint32 ssrc,
+ uint32 sender_ssrc,
+ const RtcpTimeData& time_data,
+ const RtcpCastMessage* cast_message,
+ base::TimeDelta target_delay,
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
+ const RtpReceiverStatistics* rtp_receiver_statistics) = 0;
};
} // namespace cast
diff --git a/chromium/media/cast/net/cast_transport_sender_impl.cc b/chromium/media/cast/net/cast_transport_sender_impl.cc
index 390180f131e..4def3272c74 100644
--- a/chromium/media/cast/net/cast_transport_sender_impl.cc
+++ b/chromium/media/cast/net/cast_transport_sender_impl.cc
@@ -6,7 +6,6 @@
#include "base/single_thread_task_runner.h"
#include "base/values.h"
-#include "media/cast/net/cast_transport_config.h"
#include "media/cast/net/cast_transport_defines.h"
#include "media/cast/net/udp_transport.h"
#include "net/base/net_errors.h"
@@ -18,10 +17,13 @@ namespace cast {
namespace {
// See header file for what these mean.
+const char kOptionDscp[] = "DSCP";
+#if defined(OS_WIN)
+const char kOptionNonBlockingIO[] = "non_blocking_io";
+#endif
const char kOptionPacerTargetBurstSize[] = "pacer_target_burst_size";
const char kOptionPacerMaxBurstSize[] = "pacer_max_burst_size";
const char kOptionSendBufferMinSize[] = "send_buffer_min_size";
-const char kOptionDscp[] = "DSCP";
const char kOptionWifiDisableScan[] = "disable_wifi_scan";
const char kOptionWifiMediaStreamingMode[] = "media_streaming_mode";
@@ -52,21 +54,25 @@ int32 GetTransportSendBufferSize(const base::DictionaryValue& options) {
scoped_ptr<CastTransportSender> CastTransportSender::Create(
net::NetLog* net_log,
base::TickClock* clock,
+ const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
scoped_ptr<base::DictionaryValue> options,
const CastTransportStatusCallback& status_callback,
const BulkRawEventsCallback& raw_events_callback,
base::TimeDelta raw_events_callback_interval,
+ const PacketReceiverCallback& packet_callback,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner) {
return scoped_ptr<CastTransportSender>(
new CastTransportSenderImpl(net_log,
clock,
+ local_end_point,
remote_end_point,
options.Pass(),
status_callback,
raw_events_callback,
raw_events_callback_interval,
transport_task_runner.get(),
+ packet_callback,
NULL));
}
@@ -77,12 +83,14 @@ PacketReceiverCallback CastTransportSender::PacketReceiverForTesting() {
CastTransportSenderImpl::CastTransportSenderImpl(
net::NetLog* net_log,
base::TickClock* clock,
+ const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
scoped_ptr<base::DictionaryValue> options,
const CastTransportStatusCallback& status_callback,
const BulkRawEventsCallback& raw_events_callback,
base::TimeDelta raw_events_callback_interval,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
+ const PacketReceiverCallback& packet_callback,
PacketSender* external_transport)
: clock_(clock),
status_callback_(status_callback),
@@ -92,7 +100,7 @@ CastTransportSenderImpl::CastTransportSenderImpl(
NULL :
new UdpTransport(net_log,
transport_task_runner,
- net::IPEndPoint(),
+ local_end_point,
remote_end_point,
GetTransportSendBufferSize(*options),
status_callback)),
@@ -109,6 +117,7 @@ CastTransportSenderImpl::CastTransportSenderImpl(
raw_events_callback_(raw_events_callback),
raw_events_callback_interval_(raw_events_callback_interval),
last_byte_acked_for_audio_(0),
+ packet_callback_(packet_callback),
weak_factory_(this) {
DCHECK(clock_);
if (!raw_events_callback_.is_null()) {
@@ -127,9 +136,14 @@ CastTransportSenderImpl::CastTransportSenderImpl(
// priority over other traffic.
transport_->SetDscp(net::DSCP_AF41);
}
+#if defined(OS_WIN)
+ if (options->HasKey(kOptionNonBlockingIO)) {
+ transport_->UseNonBlockingIO();
+ }
+#endif
transport_->StartReceiving(
base::Bind(&CastTransportSenderImpl::OnReceivedPacket,
- weak_factory_.GetWeakPtr()));
+ base::Unretained(this)));
int wifi_options = 0;
if (options->HasKey(kOptionWifiDisableScan)) {
wifi_options |= net::WIFI_OPTIONS_DISABLE_SCAN;
@@ -144,6 +158,9 @@ CastTransportSenderImpl::CastTransportSenderImpl(
}
CastTransportSenderImpl::~CastTransportSenderImpl() {
+ if (transport_) {
+ transport_->StopReceiving();
+ }
if (event_subscriber_.get())
logging_.RemoveRawEventSubscriber(event_subscriber_.get());
}
@@ -159,7 +176,7 @@ void CastTransportSenderImpl::InitializeAudio(
return;
}
- audio_sender_.reset(new RtpSender(clock_, transport_task_runner_, &pacer_));
+ audio_sender_.reset(new RtpSender(transport_task_runner_, &pacer_));
if (audio_sender_->Initialize(config)) {
// Audio packets have a higher priority.
pacer_.RegisterAudioSsrc(config.ssrc);
@@ -183,6 +200,7 @@ void CastTransportSenderImpl::InitializeAudio(
config.ssrc,
config.feedback_ssrc));
pacer_.RegisterAudioSsrc(config.ssrc);
+ AddValidSsrc(config.feedback_ssrc);
status_callback_.Run(TRANSPORT_AUDIO_INITIALIZED);
}
@@ -197,7 +215,7 @@ void CastTransportSenderImpl::InitializeVideo(
return;
}
- video_sender_.reset(new RtpSender(clock_, transport_task_runner_, &pacer_));
+ video_sender_.reset(new RtpSender(transport_task_runner_, &pacer_));
if (!video_sender_->Initialize(config)) {
video_sender_.reset();
status_callback_.Run(TRANSPORT_VIDEO_UNINITIALIZED);
@@ -216,6 +234,7 @@ void CastTransportSenderImpl::InitializeVideo(
config.ssrc,
config.feedback_ssrc));
pacer_.RegisterVideoSsrc(config.ssrc);
+ AddValidSsrc(config.feedback_ssrc);
status_callback_.Run(TRANSPORT_VIDEO_INITIALIZED);
}
@@ -314,8 +333,9 @@ void CastTransportSenderImpl::ResendPackets(
}
PacketReceiverCallback CastTransportSenderImpl::PacketReceiverForTesting() {
- return base::Bind(&CastTransportSenderImpl::OnReceivedPacket,
- weak_factory_.GetWeakPtr());
+ return base::Bind(
+ base::IgnoreResult(&CastTransportSenderImpl::OnReceivedPacket),
+ weak_factory_.GetWeakPtr());
}
void CastTransportSenderImpl::SendRawEvents() {
@@ -334,18 +354,35 @@ void CastTransportSenderImpl::SendRawEvents() {
raw_events_callback_interval_);
}
-void CastTransportSenderImpl::OnReceivedPacket(scoped_ptr<Packet> packet) {
+bool CastTransportSenderImpl::OnReceivedPacket(scoped_ptr<Packet> packet) {
+ const uint8_t* const data = &packet->front();
+ const size_t length = packet->size();
+ uint32 ssrc;
+ if (Rtcp::IsRtcpPacket(data, length)) {
+ ssrc = Rtcp::GetSsrcOfSender(data, length);
+ } else if (!RtpParser::ParseSsrc(data, length, &ssrc)) {
+ VLOG(1) << "Invalid RTP packet.";
+ return false;
+ }
+ if (valid_ssrcs_.find(ssrc) == valid_ssrcs_.end()) {
+ VLOG(1) << "Stale packet received.";
+ return false;
+ }
+
if (audio_rtcp_session_ &&
- audio_rtcp_session_->IncomingRtcpPacket(&packet->front(),
- packet->size())) {
- return;
+ audio_rtcp_session_->IncomingRtcpPacket(data, length)) {
+ return true;
}
if (video_rtcp_session_ &&
- video_rtcp_session_->IncomingRtcpPacket(&packet->front(),
- packet->size())) {
- return;
+ video_rtcp_session_->IncomingRtcpPacket(data, length)) {
+ return true;
}
- VLOG(1) << "Stale packet received.";
+ if (packet_callback_.is_null()) {
+ VLOG(1) << "Stale packet received.";
+ return false;
+ }
+ packet_callback_.Run(packet.Pass());
+ return true;
}
void CastTransportSenderImpl::OnReceivedLogMessage(
@@ -422,5 +459,31 @@ void CastTransportSenderImpl::OnReceivedCastMessage(
dedup_info);
}
+void CastTransportSenderImpl::AddValidSsrc(uint32 ssrc) {
+ valid_ssrcs_.insert(ssrc);
+}
+
+void CastTransportSenderImpl::SendRtcpFromRtpReceiver(
+ uint32 ssrc,
+ uint32 sender_ssrc,
+ const RtcpTimeData& time_data,
+ const RtcpCastMessage* cast_message,
+ base::TimeDelta target_delay,
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
+ const RtpReceiverStatistics* rtp_receiver_statistics) {
+ const Rtcp rtcp(RtcpCastMessageCallback(),
+ RtcpRttCallback(),
+ RtcpLogMessageCallback(),
+ clock_,
+ &pacer_,
+ ssrc,
+ sender_ssrc);
+ rtcp.SendRtcpFromRtpReceiver(time_data,
+ cast_message,
+ target_delay,
+ rtcp_events,
+ rtp_receiver_statistics);
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/net/cast_transport_sender_impl.h b/chromium/media/cast/net/cast_transport_sender_impl.h
index b241ec8aae6..51051ad9084 100644
--- a/chromium/media/cast/net/cast_transport_sender_impl.h
+++ b/chromium/media/cast/net/cast_transport_sender_impl.h
@@ -24,6 +24,8 @@
#ifndef MEDIA_CAST_NET_CAST_TRANSPORT_SENDER_IMPL_H_
#define MEDIA_CAST_NET_CAST_TRANSPORT_SENDER_IMPL_H_
+#include <set>
+
#include "base/callback.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
@@ -38,6 +40,7 @@
#include "media/cast/net/cast_transport_sender.h"
#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtp/rtp_parser.h"
#include "media/cast/net/rtp/rtp_sender.h"
namespace media {
@@ -56,6 +59,8 @@ class CastTransportSenderImpl : public CastTransportSender {
// |options| contains optional settings for the transport, possible
// keys are:
// "DSCP" (value ignored) - turns DSCP on
+ // "non_blocking_io" (value ignored) - Windows only.
+ // Turns on non-blocking IO for socket.
// "pacer_target_burst_size": int - specifies how many packets to send
// per 10 ms ideally.
// "pacer_max_burst_size": int - specifies how many pakcets to send
@@ -65,37 +70,53 @@ class CastTransportSenderImpl : public CastTransportSender {
// "disable_wifi_scan" (value ignored) - disable wifi scans while streaming
// "media_streaming_mode" (value ignored) - turn media streaming mode on
// Note, these options may be ignored on some platforms.
+ // TODO(hubbe): Too many callbacks, replace with an interface.
CastTransportSenderImpl(
net::NetLog* net_log,
base::TickClock* clock,
+ const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
scoped_ptr<base::DictionaryValue> options,
const CastTransportStatusCallback& status_callback,
const BulkRawEventsCallback& raw_events_callback,
base::TimeDelta raw_events_callback_interval,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
+ const PacketReceiverCallback& packet_callback,
PacketSender* external_transport);
- ~CastTransportSenderImpl() override;
+ ~CastTransportSenderImpl() final;
+ // CastTransportSender implementation.
void InitializeAudio(const CastTransportRtpConfig& config,
const RtcpCastMessageCallback& cast_message_cb,
- const RtcpRttCallback& rtt_cb) override;
+ const RtcpRttCallback& rtt_cb) final;
void InitializeVideo(const CastTransportRtpConfig& config,
const RtcpCastMessageCallback& cast_message_cb,
- const RtcpRttCallback& rtt_cb) override;
- void InsertFrame(uint32 ssrc, const EncodedFrame& frame) override;
+ const RtcpRttCallback& rtt_cb) final;
+ void InsertFrame(uint32 ssrc, const EncodedFrame& frame) final;
void SendSenderReport(uint32 ssrc,
base::TimeTicks current_time,
- uint32 current_time_as_rtp_timestamp) override;
+ uint32 current_time_as_rtp_timestamp) final;
void CancelSendingFrames(uint32 ssrc,
- const std::vector<uint32>& frame_ids) override;
+ const std::vector<uint32>& frame_ids) final;
+
+ void ResendFrameForKickstart(uint32 ssrc, uint32 frame_id) final;
+
+ PacketReceiverCallback PacketReceiverForTesting() final;
- void ResendFrameForKickstart(uint32 ssrc, uint32 frame_id) override;
+ // CastTransportReceiver implementation.
+ void AddValidSsrc(uint32 ssrc) final;
- PacketReceiverCallback PacketReceiverForTesting() override;
+ void SendRtcpFromRtpReceiver(
+ uint32 ssrc,
+ uint32 sender_ssrc,
+ const RtcpTimeData& time_data,
+ const RtcpCastMessage* cast_message,
+ base::TimeDelta target_delay,
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
+ const RtpReceiverStatistics* rtp_receiver_statistics) final;
private:
FRIEND_TEST_ALL_PREFIXES(CastTransportSenderImplTest, NacksCancelRetransmits);
@@ -118,7 +139,7 @@ class CastTransportSenderImpl : public CastTransportSender {
void SendRawEvents();
// Called when a packet is received.
- void OnReceivedPacket(scoped_ptr<Packet> packet);
+ bool OnReceivedPacket(scoped_ptr<Packet> packet);
// Called when a log message is received.
void OnReceivedLogMessage(EventMediaType media_type,
@@ -167,6 +188,13 @@ class CastTransportSenderImpl : public CastTransportSender {
// audio packet.
int64 last_byte_acked_for_audio_;
+ // Packets that don't match these ssrcs are ignored.
+ std::set<uint32> valid_ssrcs_;
+
+ // Called with incoming packets. (Unless they match the
+ // channels created by Initialize{Audio,Video}.
+ PacketReceiverCallback packet_callback_;
+
scoped_ptr<net::ScopedWifiOptions> wifi_options_autoreset_;
base::WeakPtrFactory<CastTransportSenderImpl> weak_factory_;
diff --git a/chromium/media/cast/net/cast_transport_sender_impl_unittest.cc b/chromium/media/cast/net/cast_transport_sender_impl_unittest.cc
index 7b951578e29..921bddf5499 100644
--- a/chromium/media/cast/net/cast_transport_sender_impl_unittest.cc
+++ b/chromium/media/cast/net/cast_transport_sender_impl_unittest.cc
@@ -31,7 +31,7 @@ class FakePacketSender : public PacketSender {
FakePacketSender()
: paused_(false), packets_sent_(0), bytes_sent_(0) {}
- bool SendPacket(PacketRef packet, const base::Closure& cb) override {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) final {
if (paused_) {
stored_packet_ = packet;
callback_ = cb;
@@ -42,7 +42,7 @@ class FakePacketSender : public PacketSender {
return true;
}
- int64 GetBytesSent() override { return bytes_sent_; }
+ int64 GetBytesSent() final { return bytes_sent_; }
void SetPaused(bool paused) {
paused_ = paused;
@@ -80,11 +80,13 @@ class CastTransportSenderImplTest : public ::testing::Test {
new CastTransportSenderImpl(NULL,
&testing_clock_,
net::IPEndPoint(),
+ net::IPEndPoint(),
make_scoped_ptr(new base::DictionaryValue),
base::Bind(&UpdateCastTransportStatus),
BulkRawEventsCallback(),
base::TimeDelta(),
task_runner_,
+ PacketReceiverCallback(),
&transport_));
task_runner_->RunTasks();
}
@@ -101,11 +103,13 @@ class CastTransportSenderImplTest : public ::testing::Test {
new CastTransportSenderImpl(NULL,
&testing_clock_,
net::IPEndPoint(),
+ net::IPEndPoint(),
options.Pass(),
base::Bind(&UpdateCastTransportStatus),
BulkRawEventsCallback(),
base::TimeDelta(),
task_runner_,
+ PacketReceiverCallback(),
&transport_));
task_runner_->RunTasks();
}
@@ -115,12 +119,14 @@ class CastTransportSenderImplTest : public ::testing::Test {
NULL,
&testing_clock_,
net::IPEndPoint(),
+ net::IPEndPoint(),
make_scoped_ptr(new base::DictionaryValue),
base::Bind(&UpdateCastTransportStatus),
base::Bind(&CastTransportSenderImplTest::LogRawEvents,
base::Unretained(this)),
base::TimeDelta::FromMilliseconds(10),
task_runner_,
+ PacketReceiverCallback(),
&transport_));
task_runner_->RunTasks();
}
diff --git a/chromium/media/cast/net/mock_cast_transport_sender.cc b/chromium/media/cast/net/mock_cast_transport_sender.cc
new file mode 100644
index 00000000000..752698bb23c
--- /dev/null
+++ b/chromium/media/cast/net/mock_cast_transport_sender.cc
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/net/mock_cast_transport_sender.h"
+
+namespace media {
+namespace cast {
+
+MockCastTransportSender::MockCastTransportSender() {}
+
+MockCastTransportSender::~MockCastTransportSender() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/net/mock_cast_transport_sender.h b/chromium/media/cast/net/mock_cast_transport_sender.h
new file mode 100644
index 00000000000..0827e1f9c8c
--- /dev/null
+++ b/chromium/media/cast/net/mock_cast_transport_sender.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_NET_MOCK_CAST_TRANSPORT_SENDER_H_
+#define MEDIA_CAST_NET_MOCK_CAST_TRANSPORT_SENDER_H_
+
+#include "media/cast/net/cast_transport_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockCastTransportSender : public CastTransportSender {
+ public:
+ MockCastTransportSender();
+ virtual ~MockCastTransportSender();
+
+ MOCK_METHOD3(InitializeAudio, void(
+ const CastTransportRtpConfig& config,
+ const RtcpCastMessageCallback& cast_message_cb,
+ const RtcpRttCallback& rtt_cb));
+ MOCK_METHOD3(InitializeVideo, void(
+ const CastTransportRtpConfig& config,
+ const RtcpCastMessageCallback& cast_message_cb,
+ const RtcpRttCallback& rtt_cb));
+ MOCK_METHOD2(InsertFrame, void(uint32 ssrc, const EncodedFrame& frame));
+ MOCK_METHOD3(SendSenderReport, void(
+ uint32 ssrc,
+ base::TimeTicks current_time,
+ uint32 current_time_as_rtp_timestamp));
+ MOCK_METHOD2(CancelSendingFrames, void(
+ uint32 ssrc,
+ const std::vector<uint32>& frame_ids));
+ MOCK_METHOD2(ResendFrameForKickstart, void(uint32 ssrc, uint32 frame_id));
+ MOCK_METHOD0(PacketReceiverForTesting, PacketReceiverCallback());
+ MOCK_METHOD1(AddValidSsrc, void(uint32 ssrc));
+ MOCK_METHOD7(SendRtcpFromRtpReceiver, void(
+ uint32 ssrc,
+ uint32 sender_ssrc,
+ const RtcpTimeData& time_data,
+ const RtcpCastMessage* cast_message,
+ base::TimeDelta target_delay,
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
+ const RtpReceiverStatistics* rtp_receiver_statistics));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_NET_MOCK_CAST_TRANSPORT_SENDER_H_
diff --git a/chromium/media/cast/net/pacing/paced_sender.h b/chromium/media/cast/net/pacing/paced_sender.h
index 71349c00b49..122cf55f815 100644
--- a/chromium/media/cast/net/pacing/paced_sender.h
+++ b/chromium/media/cast/net/pacing/paced_sender.h
@@ -89,7 +89,7 @@ class PacedSender : public PacedPacketSender,
PacketSender* external_transport,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner);
- ~PacedSender() override;
+ ~PacedSender() final;
// These must be called before non-RTCP packets are sent.
void RegisterAudioSsrc(uint32 audio_ssrc);
@@ -111,11 +111,11 @@ class PacedSender : public PacedPacketSender,
int64 GetLastByteSentForSsrc(uint32 ssrc);
// PacedPacketSender implementation.
- bool SendPackets(const SendPacketVector& packets) override;
+ bool SendPackets(const SendPacketVector& packets) final;
bool ResendPackets(const SendPacketVector& packets,
- const DedupInfo& dedup_info) override;
- bool SendRtcpPacket(uint32 ssrc, PacketRef packet) override;
- void CancelSendingPacket(const PacketKey& packet_key) override;
+ const DedupInfo& dedup_info) final;
+ bool SendRtcpPacket(uint32 ssrc, PacketRef packet) final;
+ void CancelSendingPacket(const PacketKey& packet_key) final;
private:
// Actually sends the packets to the transport.
diff --git a/chromium/media/cast/net/pacing/paced_sender_unittest.cc b/chromium/media/cast/net/pacing/paced_sender_unittest.cc
index 62ad8ea0de7..2387d4278d7 100644
--- a/chromium/media/cast/net/pacing/paced_sender_unittest.cc
+++ b/chromium/media/cast/net/pacing/paced_sender_unittest.cc
@@ -32,7 +32,7 @@ class TestPacketSender : public PacketSender {
public:
TestPacketSender() : bytes_sent_(0) {}
- bool SendPacket(PacketRef packet, const base::Closure& cb) override {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) final {
EXPECT_FALSE(expected_packet_size_.empty());
size_t expected_packet_size = expected_packet_size_.front();
expected_packet_size_.pop_front();
@@ -41,7 +41,7 @@ class TestPacketSender : public PacketSender {
return true;
}
- int64 GetBytesSent() override { return bytes_sent_; }
+ int64 GetBytesSent() final { return bytes_sent_; }
void AddExpectedSize(int expected_packet_size, int repeat_count) {
for (int i = 0; i < repeat_count; ++i) {
diff --git a/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc b/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc
index a751ff94d37..c96b71f3b54 100644
--- a/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc
+++ b/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc
@@ -11,9 +11,15 @@ namespace cast {
ReceiverRtcpEventSubscriber::ReceiverRtcpEventSubscriber(
const size_t max_size_to_retain, EventMediaType type)
- : max_size_to_retain_(max_size_to_retain), type_(type) {
+ : max_size_to_retain_(
+ max_size_to_retain * (kResendDelay * kNumResends + 1)),
+ type_(type),
+ popped_events_(0) {
DCHECK(max_size_to_retain_ > 0u);
DCHECK(type_ == AUDIO_EVENT || type_ == VIDEO_EVENT);
+ for (size_t i = 0; i < kNumResends; i++) {
+ send_ptrs_[i] = 0;
+ }
}
ReceiverRtcpEventSubscriber::~ReceiverRtcpEventSubscriber() {
@@ -33,7 +39,7 @@ void ReceiverRtcpEventSubscriber::OnReceiveFrameEvent(
case FRAME_DECODED:
rtcp_event.type = frame_event.type;
rtcp_event.timestamp = frame_event.timestamp;
- rtcp_events_.insert(
+ rtcp_events_.push_back(
std::make_pair(frame_event.rtp_timestamp, rtcp_event));
break;
default:
@@ -42,8 +48,6 @@ void ReceiverRtcpEventSubscriber::OnReceiveFrameEvent(
}
TruncateMapIfNeeded();
-
- DCHECK(rtcp_events_.size() <= max_size_to_retain_);
}
void ReceiverRtcpEventSubscriber::OnReceivePacketEvent(
@@ -56,22 +60,58 @@ void ReceiverRtcpEventSubscriber::OnReceivePacketEvent(
rtcp_event.type = packet_event.type;
rtcp_event.timestamp = packet_event.timestamp;
rtcp_event.packet_id = packet_event.packet_id;
- rtcp_events_.insert(
+ rtcp_events_.push_back(
std::make_pair(packet_event.rtp_timestamp, rtcp_event));
}
}
TruncateMapIfNeeded();
-
- DCHECK(rtcp_events_.size() <= max_size_to_retain_);
}
-void ReceiverRtcpEventSubscriber::GetRtcpEventsAndReset(
- RtcpEventMultiMap* rtcp_events) {
+struct CompareByFirst {
+ bool operator()(const std::pair<RtpTimestamp, RtcpEvent>& a,
+ const std::pair<RtpTimestamp, RtcpEvent>& b) {
+ return a.first < b.first;
+ }
+};
+
+void ReceiverRtcpEventSubscriber::GetRtcpEventsWithRedundancy(
+ RtcpEvents* rtcp_events) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(rtcp_events);
- rtcp_events->swap(rtcp_events_);
- rtcp_events_.clear();
+
+ uint64 event_level = rtcp_events_.size() + popped_events_;
+ event_levels_for_past_frames_.push_back(event_level);
+
+ for (size_t i = 0; i < kNumResends; i++) {
+ size_t resend_delay = kResendDelay * i;
+ if (event_levels_for_past_frames_.size() < resend_delay + 1)
+ break;
+
+ uint64 send_limit = event_levels_for_past_frames_[
+ event_levels_for_past_frames_.size() - 1 - resend_delay];
+
+ if (send_ptrs_[i] < popped_events_) {
+ send_ptrs_[i] = popped_events_;
+ }
+
+ while (send_ptrs_[i] < send_limit &&
+ rtcp_events->size() < kMaxEventsPerRTCP) {
+ rtcp_events->push_back(rtcp_events_[send_ptrs_[i] - popped_events_]);
+ send_ptrs_[i]++;
+ }
+ send_limit = send_ptrs_[i];
+ }
+
+ if (event_levels_for_past_frames_.size() > kResendDelay * (kNumResends + 1)) {
+ while (popped_events_ < event_levels_for_past_frames_[0]) {
+ rtcp_events_.pop_front();
+ popped_events_++;
+ }
+ event_levels_for_past_frames_.pop_front();
+ }
+
+ std::sort(rtcp_events->begin(), rtcp_events->end(), CompareByFirst());
}
void ReceiverRtcpEventSubscriber::TruncateMapIfNeeded() {
@@ -81,8 +121,11 @@ void ReceiverRtcpEventSubscriber::TruncateMapIfNeeded() {
DVLOG(3) << "RTCP event map exceeded size limit; "
<< "removing oldest entry";
// This is fine since we only insert elements one at a time.
- rtcp_events_.erase(rtcp_events_.begin());
+ rtcp_events_.pop_front();
+ popped_events_++;
}
+
+ DCHECK(rtcp_events_.size() <= max_size_to_retain_);
}
bool ReceiverRtcpEventSubscriber::ShouldProcessEvent(
diff --git a/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h b/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
index c08733ca94d..248f07a74ea 100644
--- a/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
+++ b/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
@@ -5,7 +5,8 @@
#ifndef MEDIA_CAST_RTCP_RECEIVER_RTCP_EVENT_SUBSCRIBER_H_
#define MEDIA_CAST_RTCP_RECEIVER_RTCP_EVENT_SUBSCRIBER_H_
-#include <map>
+#include <deque>
+#include <vector>
#include "base/threading/thread_checker.h"
#include "media/cast/logging/logging_defines.h"
@@ -15,6 +16,10 @@
namespace media {
namespace cast {
+static const size_t kNumResends = 3;
+static const size_t kResendDelay = 10;
+static const size_t kMaxEventsPerRTCP = 20;
+
// A RawEventSubscriber implementation with the following properties:
// - Only processes raw event types that are relevant for sending from cast
// receiver to cast sender via RTCP.
@@ -26,7 +31,8 @@ namespace cast {
// timestamp) up to the size limit.
class ReceiverRtcpEventSubscriber : public RawEventSubscriber {
public:
- typedef std::multimap<RtpTimestamp, RtcpEvent> RtcpEventMultiMap;
+ typedef std::pair<RtpTimestamp, RtcpEvent> RtcpEventPair;
+ typedef std::vector<std::pair<RtpTimestamp, RtcpEvent> > RtcpEvents;
// |max_size_to_retain|: The object will keep up to |max_size_to_retain|
// events
@@ -37,15 +43,15 @@ class ReceiverRtcpEventSubscriber : public RawEventSubscriber {
ReceiverRtcpEventSubscriber(const size_t max_size_to_retain,
EventMediaType type);
- ~ReceiverRtcpEventSubscriber() override;
+ ~ReceiverRtcpEventSubscriber() final;
// RawEventSubscriber implementation.
- void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
- void OnReceivePacketEvent(const PacketEvent& packet_event) override;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) final;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) final;
- // Assigns events collected to |rtcp_events| and clears them from this
- // object.
- void GetRtcpEventsAndReset(RtcpEventMultiMap* rtcp_events);
+ // Assigns events collected to |rtcp_events|. If there is space, some
+ // older events will be added for redundancy as well.
+ void GetRtcpEventsWithRedundancy(RtcpEvents* rtcp_events);
private:
// If |rtcp_events_.size()| exceeds |max_size_to_retain_|, remove an oldest
@@ -65,7 +71,24 @@ class ReceiverRtcpEventSubscriber : public RawEventSubscriber {
// to differentiate between video and audio frames, but since the
// implementation doesn't mix audio and video frame events, RTP timestamp
// only as key is fine.
- RtcpEventMultiMap rtcp_events_;
+ std::deque<RtcpEventPair> rtcp_events_;
+
+ // Counts how many events have been removed from rtcp_events_.
+ uint64 popped_events_;
+
+ // Events greater than send_ptrs_[0] have not been sent yet.
+ // Events greater than send_ptrs_[1] have been transmit once.
+ // Note that these counters use absolute numbers, so you need
+ // to subtract popped_events_ before looking up the events in
+ // rtcp_events_.
+ uint64 send_ptrs_[kNumResends];
+
+ // For each frame, we push how many events have been added to
+ // rtcp_events_ so far. We use this to make sure that
+ // send_ptrs_[N+1] is always at least kResendDelay frames behind
+ // send_ptrs_[N]. Old information is removed so that information
+ // for (kNumResends + 1) * kResendDelay frames remain.
+ std::deque<uint64> event_levels_for_past_frames_;
// Ensures methods are only called on the main thread.
base::ThreadChecker thread_checker_;
diff --git a/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc b/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
index 35b72a78c26..bd31d2462f2 100644
--- a/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
+++ b/chromium/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
@@ -35,7 +35,7 @@ class ReceiverRtcpEventSubscriberTest : public ::testing::Test {
~ReceiverRtcpEventSubscriberTest() override {}
- void TearDown() override {
+ void TearDown() final {
if (event_subscriber_) {
cast_environment_->Logging()->RemoveRawEventSubscriber(
event_subscriber_.get());
@@ -99,8 +99,8 @@ TEST_F(ReceiverRtcpEventSubscriberTest, LogVideoEvents) {
Init(VIDEO_EVENT);
InsertEvents();
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber_->GetRtcpEventsAndReset(&rtcp_events);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber_->GetRtcpEventsWithRedundancy(&rtcp_events);
EXPECT_EQ(3u, rtcp_events.size());
}
@@ -108,8 +108,8 @@ TEST_F(ReceiverRtcpEventSubscriberTest, LogAudioEvents) {
Init(AUDIO_EVENT);
InsertEvents();
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber_->GetRtcpEventsAndReset(&rtcp_events);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber_->GetRtcpEventsWithRedundancy(&rtcp_events);
EXPECT_EQ(3u, rtcp_events.size());
}
@@ -122,8 +122,8 @@ TEST_F(ReceiverRtcpEventSubscriberTest, DropEventsWhenSizeExceeded) {
/*rtp_timestamp*/ i * 10, /*frame_id*/ i);
}
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber_->GetRtcpEventsAndReset(&rtcp_events);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber_->GetRtcpEventsWithRedundancy(&rtcp_events);
EXPECT_EQ(10u, rtcp_events.size());
}
diff --git a/chromium/media/cast/net/rtcp/rtcp.cc b/chromium/media/cast/net/rtcp/rtcp.cc
index 77be988dac0..7b413d34c8d 100644
--- a/chromium/media/cast/net/rtcp/rtcp.cc
+++ b/chromium/media/cast/net/rtcp/rtcp.cc
@@ -53,7 +53,6 @@ std::pair<uint64, uint64> GetReceiverEventKey(
} // namespace
-
Rtcp::Rtcp(const RtcpCastMessageCallback& cast_callback,
const RtcpRttCallback& rtt_callback,
const RtcpLogMessageCallback& log_callback,
@@ -72,7 +71,9 @@ Rtcp::Rtcp(const RtcpCastMessageCallback& cast_callback,
last_report_truncated_ntp_(0),
local_clock_ahead_by_(ClockDriftSmoother::GetDefaultTimeConstant()),
lip_sync_rtp_timestamp_(0),
- lip_sync_ntp_timestamp_(0) {
+ lip_sync_ntp_timestamp_(0),
+ largest_seen_timestamp_(
+ base::TimeTicks::FromInternalValue(kint64min)) {
}
Rtcp::~Rtcp() {}
@@ -192,32 +193,41 @@ bool Rtcp::DedupeReceiverLog(RtcpReceiverLogMessage* receiver_log) {
return !receiver_log->empty();
}
+RtcpTimeData Rtcp::ConvertToNTPAndSave(base::TimeTicks now) {
+ RtcpTimeData ret;
+ ret.timestamp = now;
+
+ // Attach our NTP to all RTCP packets; with this information a "smart" sender
+ // can make decisions based on how old the RTCP message is.
+ ConvertTimeTicksToNtp(now, &ret.ntp_seconds, &ret.ntp_fraction);
+ SaveLastSentNtpTime(now, ret.ntp_seconds, ret.ntp_fraction);
+ return ret;
+}
+
void Rtcp::SendRtcpFromRtpReceiver(
+ RtcpTimeData time_data,
const RtcpCastMessage* cast_message,
base::TimeDelta target_delay,
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
- RtpReceiverStatistics* rtp_receiver_statistics) {
- base::TimeTicks now = clock_->NowTicks();
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
+ const RtpReceiverStatistics* rtp_receiver_statistics) const {
RtcpReportBlock report_block;
RtcpReceiverReferenceTimeReport rrtr;
-
- // Attach our NTP to all RTCP packets; with this information a "smart" sender
- // can make decisions based on how old the RTCP message is.
- ConvertTimeTicksToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
- SaveLastSentNtpTime(now, rrtr.ntp_seconds, rrtr.ntp_fraction);
+ rrtr.ntp_seconds = time_data.ntp_seconds;
+ rrtr.ntp_fraction = time_data.ntp_fraction;
if (rtp_receiver_statistics) {
report_block.remote_ssrc = 0; // Not needed to set send side.
report_block.media_ssrc = remote_ssrc_; // SSRC of the RTP packet sender.
- rtp_receiver_statistics->GetStatistics(
- &report_block.fraction_lost, &report_block.cumulative_lost,
- &report_block.extended_high_sequence_number, &report_block.jitter);
-
+ report_block.fraction_lost = rtp_receiver_statistics->fraction_lost;
+ report_block.cumulative_lost = rtp_receiver_statistics->cumulative_lost;
+ report_block.extended_high_sequence_number =
+ rtp_receiver_statistics->extended_high_sequence_number;
+ report_block.jitter = rtp_receiver_statistics->jitter;
report_block.last_sr = last_report_truncated_ntp_;
if (!time_last_report_received_.is_null()) {
uint32 delay_seconds = 0;
uint32 delay_fraction = 0;
- base::TimeDelta delta = now - time_last_report_received_;
+ base::TimeDelta delta = time_data.timestamp - time_last_report_received_;
ConvertTimeToFractions(delta.InMicroseconds(), &delay_seconds,
&delay_fraction);
report_block.delay_since_last_sr =
@@ -226,9 +236,10 @@ void Rtcp::SendRtcpFromRtpReceiver(
report_block.delay_since_last_sr = 0;
}
}
+ RtcpBuilder rtcp_builder(local_ssrc_);
packet_sender_->SendRtcpPacket(
local_ssrc_,
- rtcp_builder_.BuildRtcpFromReceiver(
+ rtcp_builder.BuildRtcpFromReceiver(
rtp_receiver_statistics ? &report_block : NULL,
&rrtr,
cast_message,
@@ -350,8 +361,9 @@ void Rtcp::SaveLastSentNtpTime(const base::TimeTicks& now,
uint32 last_ntp_fraction) {
// Make sure |now| is always greater than the last element in
// |last_reports_sent_queue_|.
- if (!last_reports_sent_queue_.empty())
+ if (!last_reports_sent_queue_.empty()) {
DCHECK(now >= last_reports_sent_queue_.back().second);
+ }
uint32 last_report = ConvertToNtpDiff(last_ntp_seconds, last_ntp_fraction);
last_reports_sent_map_[last_report] = now;
diff --git a/chromium/media/cast/net/rtcp/rtcp.h b/chromium/media/cast/net/rtcp/rtcp.h
index 6b3208425c9..35608e07bcc 100644
--- a/chromium/media/cast/net/rtcp/rtcp.h
+++ b/chromium/media/cast/net/rtcp/rtcp.h
@@ -38,16 +38,6 @@ typedef std::pair<uint32, base::TimeTicks> RtcpSendTimePair;
typedef std::map<uint32, base::TimeTicks> RtcpSendTimeMap;
typedef std::queue<RtcpSendTimePair> RtcpSendTimeQueue;
-class RtpReceiverStatistics {
- public:
- virtual void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost, // 24 bits valid.
- uint32* extended_high_sequence_number,
- uint32* jitter) = 0;
-
- virtual ~RtpReceiverStatistics() {}
-};
-
// TODO(hclam): This should be renamed to RtcpSession.
class Rtcp {
public:
@@ -72,16 +62,25 @@ class Rtcp {
uint32 send_packet_count,
size_t send_octet_count);
- // |cast_message| and |rtcp_events| is optional; if |cast_message| is
- // provided the RTCP receiver report will append a Cast message containing
- // Acks and Nacks; |target_delay| is sent together with |cast_message|.
- // If |rtcp_events| is provided the RTCP receiver report will append the
- // log messages.
+ // This function is meant to be used in conjunction with
+ // SendRtcpFromRtpReceiver.
+ // |now| is converted to NTP and saved internally for
+ // future round-trip/lip-sync calculations.
+ // This is done in a separate method so that SendRtcpFromRtpReceiver can
+ // be done on a separate (temporary) RTCP object.
+ RtcpTimeData ConvertToNTPAndSave(base::TimeTicks now);
+
+ // |cast_message|, |rtcp_events| and |rtp_receiver_statistics| are optional;
+ // if |cast_message| is provided the RTCP receiver report will append a Cast
+ // message containing Acks and Nacks; |target_delay| is sent together with
+ // |cast_message|. If |rtcp_events| is provided the RTCP receiver report will
+ // append the log messages.
void SendRtcpFromRtpReceiver(
+ RtcpTimeData time_data,
const RtcpCastMessage* cast_message,
base::TimeDelta target_delay,
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
- RtpReceiverStatistics* rtp_receiver_statistics);
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
+ const RtpReceiverStatistics* rtp_receiver_statistics) const;
// Submit a received packet to this object. The packet will be parsed
// and used to maintain a RTCP session.
@@ -107,6 +106,9 @@ class Rtcp {
static bool IsRtcpPacket(const uint8* packet, size_t length);
static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
+ uint32 GetLocalSsrc() const { return local_ssrc_; }
+ uint32 GetRemoteSsrc() const { return remote_ssrc_; }
+
protected:
void OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction);
void OnReceivedLipSyncInfo(uint32 rtp_timestamp,
diff --git a/chromium/media/cast/net/rtcp/rtcp_builder.cc b/chromium/media/cast/net/rtcp/rtcp_builder.cc
index b4e58c3d362..07006fcdb21 100644
--- a/chromium/media/cast/net/rtcp/rtcp_builder.cc
+++ b/chromium/media/cast/net/rtcp/rtcp_builder.cc
@@ -10,8 +10,6 @@
#include <vector>
#include "base/logging.h"
-#include "media/cast/net/cast_transport_defines.h"
-#include "media/cast/net/rtcp/rtcp_defines.h"
#include "media/cast/net/rtcp/rtcp_utility.h"
namespace media {
@@ -44,33 +42,6 @@ bool EventTimestampLessThan(const RtcpReceiverEventLogMessage& lhs,
return lhs.event_timestamp < rhs.event_timestamp;
}
-void AddReceiverLogEntries(
- const RtcpReceiverLogMessage& redundancy_receiver_log_message,
- RtcpReceiverLogMessage* receiver_log_message,
- size_t* remaining_space,
- size_t* number_of_frames,
- size_t* total_number_of_messages_to_send) {
- RtcpReceiverLogMessage::const_iterator it =
- redundancy_receiver_log_message.begin();
- while (it != redundancy_receiver_log_message.end() &&
- *remaining_space >=
- kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize) {
- receiver_log_message->push_front(*it);
- size_t num_event_logs = (*remaining_space - kRtcpReceiverFrameLogSize) /
- kRtcpReceiverEventLogSize;
- RtcpReceiverEventLogMessages& event_log_messages =
- receiver_log_message->front().event_log_messages_;
- if (num_event_logs < event_log_messages.size())
- event_log_messages.resize(num_event_logs);
-
- *remaining_space -= kRtcpReceiverFrameLogSize +
- event_log_messages.size() * kRtcpReceiverEventLogSize;
- ++number_of_frames;
- *total_number_of_messages_to_send += event_log_messages.size();
- ++it;
- }
-}
-
// A class to build a string representing the NACK list in Cast message.
//
// The string will look like "23:3-6 25:1,5-6", meaning packets 3 to 6 in frame
@@ -197,7 +168,7 @@ PacketRef RtcpBuilder::BuildRtcpFromReceiver(
const RtcpReportBlock* report_block,
const RtcpReceiverReferenceTimeReport* rrtr,
const RtcpCastMessage* cast_message,
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
base::TimeDelta target_delay) {
Start();
@@ -366,7 +337,7 @@ void RtcpBuilder::AddDlrrRb(const RtcpDlrrReportBlock& dlrr) {
}
void RtcpBuilder::AddReceiverLog(
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events) {
+ const ReceiverRtcpEventSubscriber::RtcpEvents& rtcp_events) {
size_t total_number_of_messages_to_send = 0;
RtcpReceiverLogMessage receiver_log_message;
@@ -441,12 +412,11 @@ void RtcpBuilder::AddReceiverLog(
}
bool RtcpBuilder::GetRtcpReceiverLogMessage(
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events,
+ const ReceiverRtcpEventSubscriber::RtcpEvents& rtcp_events,
RtcpReceiverLogMessage* receiver_log_message,
size_t* total_number_of_messages_to_send) {
size_t number_of_frames = 0;
- size_t remaining_space =
- std::min<size_t>(kMaxReceiverLogBytes, writer_.remaining());
+ size_t remaining_space = writer_.remaining();
if (remaining_space < kRtcpCastLogHeaderSize + kRtcpReceiverFrameLogSize +
kRtcpReceiverEventLogSize) {
return false;
@@ -459,7 +429,7 @@ bool RtcpBuilder::GetRtcpReceiverLogMessage(
// Account for the RTCP header for an application-defined packet.
remaining_space -= kRtcpCastLogHeaderSize;
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap::const_reverse_iterator rit =
+ ReceiverRtcpEventSubscriber::RtcpEvents::const_reverse_iterator rit =
rtcp_events.rbegin();
while (rit != rtcp_events.rend() &&
@@ -510,36 +480,6 @@ bool RtcpBuilder::GetRtcpReceiverLogMessage(
receiver_log_message->push_front(frame_log);
}
- rtcp_events_history_.push_front(*receiver_log_message);
-
- // We don't try to match RTP timestamps of redundancy frame logs with those
- // from the newest set (which would save the space of an extra RTP timestamp
- // over the wire). Unless the redundancy frame logs are very recent, it's
- // unlikely there will be a match anyway.
- if (rtcp_events_history_.size() > kFirstRedundancyOffset) {
- // Add first redundnacy messages, if enough space remaining
- AddReceiverLogEntries(rtcp_events_history_[kFirstRedundancyOffset],
- receiver_log_message,
- &remaining_space,
- &number_of_frames,
- total_number_of_messages_to_send);
- }
-
- if (rtcp_events_history_.size() > kSecondRedundancyOffset) {
- // Add second redundancy messages, if enough space remaining
- AddReceiverLogEntries(rtcp_events_history_[kSecondRedundancyOffset],
- receiver_log_message,
- &remaining_space,
- &number_of_frames,
- total_number_of_messages_to_send);
- }
-
- if (rtcp_events_history_.size() > kReceiveLogMessageHistorySize) {
- rtcp_events_history_.pop_back();
- }
-
- DCHECK_LE(rtcp_events_history_.size(), kReceiveLogMessageHistorySize);
-
VLOG(3) << "number of frames: " << number_of_frames;
VLOG(3) << "total messages to send: " << *total_number_of_messages_to_send;
return number_of_frames > 0;
diff --git a/chromium/media/cast/net/rtcp/rtcp_builder.h b/chromium/media/cast/net/rtcp/rtcp_builder.h
index b530648e4f3..2f22c3910fa 100644
--- a/chromium/media/cast/net/rtcp/rtcp_builder.h
+++ b/chromium/media/cast/net/rtcp/rtcp_builder.h
@@ -19,27 +19,6 @@
namespace media {
namespace cast {
-// We limit the size of receiver logs to avoid queuing up packets.
-const size_t kMaxReceiverLogBytes = 200;
-
-// The determines how long to hold receiver log events, based on how
-// many "receiver log message reports" ago the events were sent.
-const size_t kReceiveLogMessageHistorySize = 20;
-
-// This determines when to send events the second time.
-const size_t kFirstRedundancyOffset = 10;
-COMPILE_ASSERT(kFirstRedundancyOffset > 0 &&
- kFirstRedundancyOffset <= kReceiveLogMessageHistorySize,
- redundancy_offset_out_of_range);
-
-// When to send events the third time.
-const size_t kSecondRedundancyOffset = 20;
-COMPILE_ASSERT(kSecondRedundancyOffset >
- kFirstRedundancyOffset && kSecondRedundancyOffset <=
- kReceiveLogMessageHistorySize,
- redundancy_offset_out_of_range);
-
-
class RtcpBuilder {
public:
explicit RtcpBuilder(uint32 sending_ssrc);
@@ -49,7 +28,7 @@ class RtcpBuilder {
const RtcpReportBlock* report_block,
const RtcpReceiverReferenceTimeReport* rrtr,
const RtcpCastMessage* cast_message,
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
+ const ReceiverRtcpEventSubscriber::RtcpEvents* rtcp_events,
base::TimeDelta target_delay);
PacketRef BuildRtcpFromSender(const RtcpSenderInfo& sender_info);
@@ -65,10 +44,10 @@ class RtcpBuilder {
void AddSR(const RtcpSenderInfo& sender_info);
void AddDlrrRb(const RtcpDlrrReportBlock& dlrr);
void AddReceiverLog(
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events);
+ const ReceiverRtcpEventSubscriber::RtcpEvents& rtcp_events);
bool GetRtcpReceiverLogMessage(
- const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events,
+ const ReceiverRtcpEventSubscriber::RtcpEvents& rtcp_events,
RtcpReceiverLogMessage* receiver_log_message,
size_t* total_number_of_messages_to_send);
@@ -79,7 +58,6 @@ class RtcpBuilder {
const uint32 ssrc_;
char* ptr_of_length_;
PacketRef packet_;
- std::deque<RtcpReceiverLogMessage> rtcp_events_history_;
DISALLOW_COPY_AND_ASSIGN(RtcpBuilder);
};
diff --git a/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc b/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc
index e68444aef32..6bd9d058122 100644
--- a/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc
+++ b/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc
@@ -47,12 +47,16 @@ class RtcpBuilderTest : public ::testing::Test {
void ExpectPacketEQ(scoped_ptr<Packet> golden_packet,
PacketRef packet) {
+ int diffs = 0;
EXPECT_EQ(golden_packet->size(), packet->data.size());
if (golden_packet->size() == packet->data.size()) {
for (size_t x = 0; x < golden_packet->size(); x++) {
- EXPECT_EQ((*golden_packet)[x], packet->data[x]);
- if ((*golden_packet)[x] != packet->data[x])
- break;
+ EXPECT_EQ((*golden_packet)[x], packet->data[x]) <<
+ "x = " << x << " / " << golden_packet->size();
+ if ((*golden_packet)[x] != packet->data[x]) {
+ if (++diffs > 5)
+ break;
+ }
}
}
}
@@ -191,7 +195,7 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
missing_packets;
ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
ExpectPacketEQ(p.GetPacket().Pass(),
rtcp_builder_->BuildRtcpFromReceiver(
@@ -224,7 +228,7 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
packet_event.timestamp = testing_clock.NowTicks();
packet_event.packet_id = kLostPacketId1;
event_subscriber.OnReceivePacketEvent(packet_event);
- event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+ event_subscriber.GetRtcpEventsWithRedundancy(&rtcp_events);
EXPECT_EQ(2u, rtcp_events.size());
ExpectPacketEQ(
@@ -252,31 +256,22 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportWithOversizedFrameLog) {
p.AddReceiverLog(kSendingSsrc);
- int remaining_bytes = kMaxReceiverLogBytes;
- remaining_bytes -= kRtcpCastLogHeaderSize;
+ int num_events = kMaxEventsPerRTCP;
- remaining_bytes -= kRtcpReceiverFrameLogSize;
- int num_events = remaining_bytes / kRtcpReceiverEventLogSize;
EXPECT_LE(num_events, static_cast<int>(kRtcpMaxReceiverLogMessages));
- // Only the last |num_events| events are sent due to receiver log size cap.
p.AddReceiverFrameLog(
kRtpTimestamp + 2345,
num_events,
- kTimeBaseMs + (kRtcpMaxReceiverLogMessages - num_events) * kTimeDelayMs);
+ kTimeBaseMs);
for (int i = 0; i < num_events; i++) {
p.AddReceiverEventLog(
- kLostPacketId1, PACKET_RECEIVED,
+ kLostPacketId1,
+ PACKET_RECEIVED,
static_cast<uint16>(kTimeDelayMs * i));
}
ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
- FrameEvent frame_event;
- frame_event.rtp_timestamp = kRtpTimestamp;
- frame_event.type = FRAME_ACK_SENT;
- frame_event.media_type = VIDEO_EVENT;
- frame_event.timestamp = testing_clock.NowTicks();
- event_subscriber.OnReceiveFrameEvent(frame_event);
for (size_t i = 0; i < kRtcpMaxReceiverLogMessages; ++i) {
PacketEvent packet_event;
@@ -289,8 +284,8 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportWithOversizedFrameLog) {
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
}
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber.GetRtcpEventsWithRedundancy(&rtcp_events);
ExpectPacketEQ(p.GetPacket().Pass(),
rtcp_builder_->BuildRtcpFromReceiver(
@@ -316,16 +311,9 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportWithTooManyLogFrames) {
p.AddReceiverLog(kSendingSsrc);
- int remaining_bytes = kMaxReceiverLogBytes;
- remaining_bytes -= kRtcpCastLogHeaderSize;
-
- int num_events =
- remaining_bytes / (kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize);
+ int num_events = kMaxEventsPerRTCP;
- // The last |num_events| events are sent due to receiver log size cap.
- for (size_t i = kRtcpMaxReceiverLogMessages - num_events;
- i < kRtcpMaxReceiverLogMessages;
- ++i) {
+ for (int i = 0; i < num_events; i++) {
p.AddReceiverFrameLog(kRtpTimestamp + i, 1, kTimeBaseMs + i * kTimeDelayMs);
p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
}
@@ -342,8 +330,8 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportWithTooManyLogFrames) {
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
}
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber.GetRtcpEventsWithRedundancy(&rtcp_events);
ExpectPacketEQ(p.GetPacket().Pass(),
rtcp_builder_->BuildRtcpFromReceiver(
@@ -389,8 +377,8 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportWithOldLogFrames) {
base::TimeDelta::FromMilliseconds(kTimeBetweenEventsMs));
}
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber.GetRtcpEventsWithRedundancy(&rtcp_events);
ExpectPacketEQ(p.GetPacket().Pass(),
rtcp_builder_->BuildRtcpFromReceiver(
@@ -411,7 +399,7 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportRedundancy) {
testing_clock.Advance(base::TimeDelta::FromMilliseconds(time_base_ms));
ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
- size_t packet_count = kReceiveLogMessageHistorySize + 10;
+ size_t packet_count = kNumResends * kResendDelay + 10;
for (size_t i = 0; i < packet_count; i++) {
TestRtcpPacketBuilder p;
p.AddRr(kSendingSsrc, 1);
@@ -419,22 +407,15 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportRedundancy) {
p.AddReceiverLog(kSendingSsrc);
- if (i >= kSecondRedundancyOffset) {
- p.AddReceiverFrameLog(
- kRtpTimestamp,
- 1,
- time_base_ms - kSecondRedundancyOffset * kTimeBetweenEventsMs);
- p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
+ int num_events = (i + kResendDelay) / kResendDelay;
+ num_events = std::min<int>(num_events, kNumResends);
+ p.AddReceiverFrameLog(kRtpTimestamp, num_events,
+ time_base_ms - (num_events - 1) * kResendDelay *
+ kTimeBetweenEventsMs);
+ for (int i = 0; i < num_events; i++) {
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT,
+ i * kResendDelay * kTimeBetweenEventsMs);
}
- if (i >= kFirstRedundancyOffset) {
- p.AddReceiverFrameLog(
- kRtpTimestamp,
- 1,
- time_base_ms - kFirstRedundancyOffset * kTimeBetweenEventsMs);
- p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
- }
- p.AddReceiverFrameLog(kRtpTimestamp, 1, time_base_ms);
- p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
FrameEvent frame_event;
frame_event.rtp_timestamp = kRtpTimestamp;
@@ -443,8 +424,8 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportRedundancy) {
frame_event.timestamp = testing_clock.NowTicks();
event_subscriber.OnReceiveFrameEvent(frame_event);
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber.GetRtcpEventsWithRedundancy(&rtcp_events);
ExpectPacketEQ(p.GetPacket().Pass(),
rtcp_builder_->BuildRtcpFromReceiver(
diff --git a/chromium/media/cast/net/rtcp/rtcp_defines.cc b/chromium/media/cast/net/rtcp/rtcp_defines.cc
index a296dc8e480..f0ceef30789 100644
--- a/chromium/media/cast/net/rtcp/rtcp_defines.cc
+++ b/chromium/media/cast/net/rtcp/rtcp_defines.cc
@@ -33,5 +33,17 @@ RtcpReceiverReferenceTimeReport::~RtcpReceiverReferenceTimeReport() {}
RtcpEvent::RtcpEvent() : type(UNKNOWN), packet_id(0u) {}
RtcpEvent::~RtcpEvent() {}
+RtpReceiverStatistics::RtpReceiverStatistics() :
+ fraction_lost(0),
+ cumulative_lost(0),
+ extended_high_sequence_number(0),
+ jitter(0) {}
+
+SendRtcpFromRtpReceiver_Params::SendRtcpFromRtpReceiver_Params()
+ : ssrc(0),
+ sender_ssrc(0) {}
+
+SendRtcpFromRtpReceiver_Params::~SendRtcpFromRtpReceiver_Params() {}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/net/rtcp/rtcp_defines.h b/chromium/media/cast/net/rtcp/rtcp_defines.h
index 3dd23ef6a4c..b35e14bfe63 100644
--- a/chromium/media/cast/net/rtcp/rtcp_defines.h
+++ b/chromium/media/cast/net/rtcp/rtcp_defines.h
@@ -109,6 +109,37 @@ typedef base::Callback<void(base::TimeDelta)> RtcpRttCallback;
typedef
base::Callback<void(const RtcpReceiverLogMessage&)> RtcpLogMessageCallback;
+// TODO(hubbe): Document members of this struct.
+struct RtpReceiverStatistics {
+ RtpReceiverStatistics();
+ uint8 fraction_lost;
+ uint32 cumulative_lost; // 24 bits valid.
+ uint32 extended_high_sequence_number;
+ uint32 jitter;
+};
+
+// These are intended to only be created using Rtcp::ConvertToNTPAndSave.
+struct RtcpTimeData {
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ base::TimeTicks timestamp;
+};
+
+// This struct is used to encapsulate all the parameters of the
+// SendRtcpFromRtpReceiver for IPC transportation.
+struct SendRtcpFromRtpReceiver_Params {
+ SendRtcpFromRtpReceiver_Params();
+ ~SendRtcpFromRtpReceiver_Params();
+ uint32 ssrc;
+ uint32 sender_ssrc;
+ RtcpTimeData time_data;
+ scoped_ptr<RtcpCastMessage> cast_message;
+ base::TimeDelta target_delay;
+ scoped_ptr<std::vector<std::pair<RtpTimestamp, RtcpEvent> > > rtcp_events;
+ scoped_ptr<RtpReceiverStatistics> rtp_receiver_statistics;
+};
+
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/net/rtcp/rtcp_unittest.cc b/chromium/media/cast/net/rtcp/rtcp_unittest.cc
index 707cd959716..07963518bd0 100644
--- a/chromium/media/cast/net/rtcp/rtcp_unittest.cc
+++ b/chromium/media/cast/net/rtcp/rtcp_unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <stdint.h>
+#include <vector>
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
@@ -25,55 +26,60 @@ class FakeRtcpTransport : public PacedPacketSender {
public:
explicit FakeRtcpTransport(base::SimpleTestTickClock* clock)
: clock_(clock),
- packet_delay_(base::TimeDelta::FromMilliseconds(42)) {}
+ packet_delay_(base::TimeDelta::FromMilliseconds(42)),
+ paused_(false) {}
void set_rtcp_destination(Rtcp* rtcp) { rtcp_ = rtcp; }
base::TimeDelta packet_delay() const { return packet_delay_; }
void set_packet_delay(base::TimeDelta delay) { packet_delay_ = delay; }
- bool SendRtcpPacket(uint32 ssrc, PacketRef packet) override {
+ bool SendRtcpPacket(uint32 ssrc, PacketRef packet) final {
clock_->Advance(packet_delay_);
- rtcp_->IncomingRtcpPacket(&packet->data[0], packet->data.size());
+ if (paused_) {
+ packet_queue_.push_back(packet);
+ } else {
+ rtcp_->IncomingRtcpPacket(&packet->data[0], packet->data.size());
+ }
return true;
}
- bool SendPackets(const SendPacketVector& packets) override { return false; }
+ bool SendPackets(const SendPacketVector& packets) final { return false; }
bool ResendPackets(const SendPacketVector& packets,
- const DedupInfo& dedup_info) override {
+ const DedupInfo& dedup_info) final {
return false;
}
- void CancelSendingPacket(const PacketKey& packet_key) override {}
+ void CancelSendingPacket(const PacketKey& packet_key) final {}
+
+ void Pause() {
+ paused_ = true;
+ }
+
+ void Unpause() {
+ paused_ = false;
+ for (size_t i = 0; i < packet_queue_.size(); ++i) {
+ rtcp_->IncomingRtcpPacket(&packet_queue_[i]->data[0],
+ packet_queue_[i]->data.size());
+ }
+ packet_queue_.clear();
+ }
+
+ void ReversePacketQueue() {
+ std::reverse(packet_queue_.begin(), packet_queue_.end());
+ }
private:
base::SimpleTestTickClock* const clock_;
base::TimeDelta packet_delay_;
Rtcp* rtcp_;
+ bool paused_;
+ std::vector<PacketRef> packet_queue_;
DISALLOW_COPY_AND_ASSIGN(FakeRtcpTransport);
};
-class FakeReceiverStats : public RtpReceiverStatistics {
- public:
- FakeReceiverStats() {}
- ~FakeReceiverStats() override {}
-
- void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost,
- uint32* extended_high_sequence_number,
- uint32* jitter) override {
- *fraction_lost = 0;
- *cumulative_lost = 0;
- *extended_high_sequence_number = 0;
- *jitter = 0;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(FakeReceiverStats);
-};
-
class MockFrameSender {
public:
MockFrameSender() {}
@@ -128,7 +134,6 @@ class RtcpTest : public ::testing::Test {
MockFrameSender mock_frame_sender_;
Rtcp rtcp_for_sender_;
Rtcp rtcp_for_receiver_;
- FakeReceiverStats stats_;
DISALLOW_COPY_AND_ASSIGN(RtcpTest);
};
@@ -198,8 +203,10 @@ TEST_F(RtcpTest, RoundTripTimesDeterminedFromReportPingPong) {
#endif
// Receiver --> Sender
+ RtpReceiverStatistics stats;
rtcp_for_receiver_.SendRtcpFromRtpReceiver(
- NULL, base::TimeDelta(), NULL, &stats_);
+ rtcp_for_receiver_.ConvertToNTPAndSave(receiver_clock_->NowTicks()),
+ NULL, base::TimeDelta(), NULL, &stats);
expected_rtt_according_to_sender = one_way_trip_time * 2;
EXPECT_EQ(expected_rtt_according_to_sender,
rtcp_for_sender_.current_round_trip_time());
@@ -216,6 +223,47 @@ TEST_F(RtcpTest, RoundTripTimesDeterminedFromReportPingPong) {
}
}
+TEST_F(RtcpTest, RejectOldRtcpPacket) {
+ EXPECT_CALL(mock_frame_sender_, OnReceivedCastFeedback(_))
+ .Times(1);
+
+ // This is rejected.
+ RtcpCastMessage cast_message(kSenderSsrc);
+ cast_message.ack_frame_id = 1;
+ receiver_to_sender_.Pause();
+ rtcp_for_receiver_.SendRtcpFromRtpReceiver(
+ rtcp_for_receiver_.ConvertToNTPAndSave(
+ receiver_clock_->NowTicks() - base::TimeDelta::FromSeconds(10)),
+ &cast_message, base::TimeDelta(), NULL, NULL);
+
+ cast_message.ack_frame_id = 2;
+ rtcp_for_receiver_.SendRtcpFromRtpReceiver(
+ rtcp_for_receiver_.ConvertToNTPAndSave(receiver_clock_->NowTicks()),
+ &cast_message, base::TimeDelta(), NULL, NULL);
+
+ receiver_to_sender_.ReversePacketQueue();
+ receiver_to_sender_.Unpause();
+}
+
+TEST_F(RtcpTest, NegativeTimeTicks) {
+ EXPECT_CALL(mock_frame_sender_, OnReceivedCastFeedback(_))
+ .Times(2);
+
+ // Send a RRTR with NTP timestamp that translates to a very negative
+ // value for TimeTicks.
+ RtcpCastMessage cast_message(kSenderSsrc);
+ cast_message.ack_frame_id = 2;
+ rtcp_for_receiver_.SendRtcpFromRtpReceiver(
+ rtcp_for_receiver_.ConvertToNTPAndSave(
+ base::TimeTicks() - base::TimeDelta::FromSeconds(5)),
+ &cast_message, base::TimeDelta(), NULL, NULL);
+
+ cast_message.ack_frame_id = 1;
+ rtcp_for_receiver_.SendRtcpFromRtpReceiver(
+ rtcp_for_receiver_.ConvertToNTPAndSave(base::TimeTicks()),
+ &cast_message, base::TimeDelta(), NULL, NULL);
+}
+
// TODO(miu): Find a better home for this test.
TEST(MisplacedCastTest, NtpAndTime) {
const int64 kSecondsbetweenYear1900and2010 = INT64_C(40176 * 24 * 60 * 60);
diff --git a/chromium/media/cast/net/rtp/cast_message_builder_unittest.cc b/chromium/media/cast/net/rtp/cast_message_builder_unittest.cc
index 240d04a7ac4..bae827a5a8c 100644
--- a/chromium/media/cast/net/rtp/cast_message_builder_unittest.cc
+++ b/chromium/media/cast/net/rtp/cast_message_builder_unittest.cc
@@ -28,7 +28,7 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
NackFeedbackVerification()
: triggered_(false), missing_packets_(), last_frame_acked_(0) {}
- void CastFeedback(const RtcpCastMessage& cast_feedback) override {
+ void CastFeedback(const RtcpCastMessage& cast_feedback) final {
EXPECT_EQ(kSsrc, cast_feedback.media_ssrc);
last_frame_acked_ = cast_feedback.ack_frame_id;
diff --git a/chromium/media/cast/net/rtp/receiver_stats.cc b/chromium/media/cast/net/rtp/receiver_stats.cc
index 416cdd8ea51..2dbdbcb9bba 100644
--- a/chromium/media/cast/net/rtp/receiver_stats.cc
+++ b/chromium/media/cast/net/rtp/receiver_stats.cc
@@ -22,15 +22,11 @@ ReceiverStats::ReceiverStats(base::TickClock* clock)
interval_number_packets_(0),
interval_wrap_count_(0) {}
-ReceiverStats::~ReceiverStats() {}
-
-void ReceiverStats::GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost,
- uint32* extended_high_sequence_number,
- uint32* jitter) {
+RtpReceiverStatistics ReceiverStats::GetStatistics() {
+ RtpReceiverStatistics ret;
// Compute losses.
if (interval_number_packets_ == 0) {
- *fraction_lost = 0;
+ ret.fraction_lost = 0;
} else {
int diff = 0;
if (interval_wrap_count_ == 0) {
@@ -42,36 +38,38 @@ void ReceiverStats::GetStatistics(uint8* fraction_lost,
}
if (diff < 1) {
- *fraction_lost = 0;
+ ret.fraction_lost = 0;
} else {
float tmp_ratio =
(1 - static_cast<float>(interval_number_packets_) / abs(diff));
- *fraction_lost = static_cast<uint8>(256 * tmp_ratio);
+ ret.fraction_lost = static_cast<uint8>(256 * tmp_ratio);
}
}
int expected_packets_num = max_sequence_number_ - min_sequence_number_ + 1;
if (total_number_packets_ == 0) {
- *cumulative_lost = 0;
+ ret.cumulative_lost = 0;
} else if (sequence_number_cycles_ == 0) {
- *cumulative_lost = expected_packets_num - total_number_packets_;
+ ret.cumulative_lost = expected_packets_num - total_number_packets_;
} else {
- *cumulative_lost =
+ ret.cumulative_lost =
kMaxSequenceNumber * (sequence_number_cycles_ - 1) +
(expected_packets_num - total_number_packets_ + kMaxSequenceNumber);
}
// Extended high sequence number consists of the highest seq number and the
// number of cycles (wrap).
- *extended_high_sequence_number =
+ ret.extended_high_sequence_number =
(sequence_number_cycles_ << 16) + max_sequence_number_;
- *jitter = static_cast<uint32>(std::abs(jitter_.InMillisecondsRoundedUp()));
+ ret.jitter = static_cast<uint32>(std::abs(jitter_.InMillisecondsRoundedUp()));
// Reset interval values.
interval_min_sequence_number_ = 0;
interval_number_packets_ = 0;
interval_wrap_count_ = 0;
+
+ return ret;
}
void ReceiverStats::UpdateStatistics(const RtpCastHeader& header) {
diff --git a/chromium/media/cast/net/rtp/receiver_stats.h b/chromium/media/cast/net/rtp/receiver_stats.h
index d428403dff9..9cdda97918e 100644
--- a/chromium/media/cast/net/rtp/receiver_stats.h
+++ b/chromium/media/cast/net/rtp/receiver_stats.h
@@ -13,15 +13,11 @@
namespace media {
namespace cast {
-class ReceiverStats : public RtpReceiverStatistics {
+class ReceiverStats {
public:
explicit ReceiverStats(base::TickClock* clock);
- ~ReceiverStats() override;
- void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost, // 24 bits valid.
- uint32* extended_high_sequence_number,
- uint32* jitter) override;
+ RtpReceiverStatistics GetStatistics();
void UpdateStatistics(const RtpCastHeader& header);
private:
diff --git a/chromium/media/cast/net/rtp/receiver_stats_unittest.cc b/chromium/media/cast/net/rtp/receiver_stats_unittest.cc
index 7bb3dd13769..4bde4a30c7e 100644
--- a/chromium/media/cast/net/rtp/receiver_stats_unittest.cc
+++ b/chromium/media/cast/net/rtp/receiver_stats_unittest.cc
@@ -20,11 +20,7 @@ static const uint32 kStdTimeIncrementMs = 33;
class ReceiverStatsTest : public ::testing::Test {
protected:
ReceiverStatsTest()
- : stats_(&testing_clock_),
- fraction_lost_(0),
- cumulative_lost_(0),
- extended_high_sequence_number_(0),
- jitter_(0) {
+ : stats_(&testing_clock_) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
start_time_ = testing_clock_.NowTicks();
@@ -45,10 +41,6 @@ class ReceiverStatsTest : public ::testing::Test {
ReceiverStats stats_;
RtpCastHeader rtp_header_;
- uint8 fraction_lost_;
- uint32 cumulative_lost_;
- uint32 extended_high_sequence_number_;
- uint32 jitter_;
base::SimpleTestTickClock testing_clock_;
base::TimeTicks start_time_;
base::TimeDelta delta_increments_;
@@ -57,14 +49,11 @@ class ReceiverStatsTest : public ::testing::Test {
};
TEST_F(ReceiverStatsTest, ResetState) {
- stats_.GetStatistics(&fraction_lost_,
- &cumulative_lost_,
- &extended_high_sequence_number_,
- &jitter_);
- EXPECT_EQ(0u, fraction_lost_);
- EXPECT_EQ(0u, cumulative_lost_);
- EXPECT_EQ(0u, extended_high_sequence_number_);
- EXPECT_EQ(0u, jitter_);
+ RtpReceiverStatistics s = stats_.GetStatistics();
+ EXPECT_EQ(0u, s.fraction_lost);
+ EXPECT_EQ(0u, s.cumulative_lost);
+ EXPECT_EQ(0u, s.extended_high_sequence_number);
+ EXPECT_EQ(0u, s.jitter);
}
TEST_F(ReceiverStatsTest, LossCount) {
@@ -77,15 +66,12 @@ TEST_F(ReceiverStatsTest, LossCount) {
++rtp_header_.sequence_number;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_,
- &cumulative_lost_,
- &extended_high_sequence_number_,
- &jitter_);
- EXPECT_EQ(63u, fraction_lost_);
- EXPECT_EQ(74u, cumulative_lost_);
+ RtpReceiverStatistics s = stats_.GetStatistics();
+ EXPECT_EQ(63u, s.fraction_lost);
+ EXPECT_EQ(74u, s.cumulative_lost);
// Build extended sequence number.
const uint32 extended_seq_num = rtp_header_.sequence_number - 1;
- EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+ EXPECT_EQ(extended_seq_num, s.extended_high_sequence_number);
}
TEST_F(ReceiverStatsTest, NoLossWrap) {
@@ -98,15 +84,12 @@ TEST_F(ReceiverStatsTest, NoLossWrap) {
++rtp_header_.sequence_number;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_,
- &cumulative_lost_,
- &extended_high_sequence_number_,
- &jitter_);
- EXPECT_EQ(0u, fraction_lost_);
- EXPECT_EQ(0u, cumulative_lost_);
+ RtpReceiverStatistics s = stats_.GetStatistics();
+ EXPECT_EQ(0u, s.fraction_lost);
+ EXPECT_EQ(0u, s.cumulative_lost);
// Build extended sequence number (one wrap cycle).
const uint32 extended_seq_num = (1 << 16) + rtp_header_.sequence_number - 1;
- EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+ EXPECT_EQ(extended_seq_num, s.extended_high_sequence_number);
}
TEST_F(ReceiverStatsTest, LossCountWrap) {
@@ -121,15 +104,12 @@ TEST_F(ReceiverStatsTest, LossCountWrap) {
++rtp_header_.sequence_number;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_,
- &cumulative_lost_,
- &extended_high_sequence_number_,
- &jitter_);
- EXPECT_EQ(63u, fraction_lost_);
- EXPECT_EQ(74u, cumulative_lost_);
+ RtpReceiverStatistics s = stats_.GetStatistics();
+ EXPECT_EQ(63u, s.fraction_lost);
+ EXPECT_EQ(74u, s.cumulative_lost);
// Build extended sequence number (one wrap cycle).
const uint32 extended_seq_num = (1 << 16) + rtp_header_.sequence_number - 1;
- EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+ EXPECT_EQ(extended_seq_num, s.extended_high_sequence_number);
}
TEST_F(ReceiverStatsTest, BasicJitter) {
@@ -139,16 +119,13 @@ TEST_F(ReceiverStatsTest, BasicJitter) {
rtp_header_.rtp_timestamp += 33 * 90;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_,
- &cumulative_lost_,
- &extended_high_sequence_number_,
- &jitter_);
- EXPECT_FALSE(fraction_lost_);
- EXPECT_FALSE(cumulative_lost_);
+ RtpReceiverStatistics s = stats_.GetStatistics();
+ EXPECT_FALSE(s.fraction_lost);
+ EXPECT_FALSE(s.cumulative_lost);
// Build extended sequence number (one wrap cycle).
const uint32 extended_seq_num = rtp_header_.sequence_number - 1;
- EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
- EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs, 300), jitter_);
+ EXPECT_EQ(extended_seq_num, s.extended_high_sequence_number);
+ EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs, 300), s.jitter);
}
TEST_F(ReceiverStatsTest, NonTrivialJitter) {
@@ -161,17 +138,14 @@ TEST_F(ReceiverStatsTest, NonTrivialJitter) {
base::TimeDelta::FromMilliseconds(kAdditionalIncrement);
testing_clock_.Advance(delta_increments_ + additional_delta);
}
- stats_.GetStatistics(&fraction_lost_,
- &cumulative_lost_,
- &extended_high_sequence_number_,
- &jitter_);
- EXPECT_FALSE(fraction_lost_);
- EXPECT_FALSE(cumulative_lost_);
+ RtpReceiverStatistics s = stats_.GetStatistics();
+ EXPECT_FALSE(s.fraction_lost);
+ EXPECT_FALSE(s.cumulative_lost);
// Build extended sequence number (one wrap cycle).
const uint32 extended_seq_num = rtp_header_.sequence_number - 1;
- EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+ EXPECT_EQ(extended_seq_num, s.extended_high_sequence_number);
EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs + kAdditionalIncrement, 300),
- jitter_);
+ s.jitter);
}
} // namespace cast
diff --git a/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc b/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc
index aa1b4c59b21..802a9c49d1f 100644
--- a/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc
+++ b/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc
@@ -61,7 +61,7 @@ class TestRtpPacketTransport : public PacketSender {
EXPECT_EQ(expected_frame_id_ - 1u, rtp_header.reference_frame_id);
}
- bool SendPacket(PacketRef packet, const base::Closure& cb) override {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) final {
++packets_sent_;
RtpHeaderParser parser(&packet->data[0], packet->data.size());
RtpCastTestHeader rtp_header;
@@ -72,7 +72,7 @@ class TestRtpPacketTransport : public PacketSender {
return true;
}
- int64 GetBytesSent() override { return 0; }
+ int64 GetBytesSent() final { return 0; }
size_t number_of_packets_received() const { return packets_sent_; }
diff --git a/chromium/media/cast/net/rtp/rtp_parser.cc b/chromium/media/cast/net/rtp/rtp_parser.cc
index 1d68cd15a1c..bb6c2c0ebd8 100644
--- a/chromium/media/cast/net/rtp/rtp_parser.cc
+++ b/chromium/media/cast/net/rtp/rtp_parser.cc
@@ -12,6 +12,15 @@
namespace media {
namespace cast {
+// static
+bool RtpParser::ParseSsrc(const uint8* packet,
+ size_t length,
+ uint32* ssrc) {
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(packet), length);
+ return big_endian_reader.Skip(8) && big_endian_reader.ReadU32(ssrc);
+}
+
RtpParser::RtpParser(uint32 expected_sender_ssrc, uint8 expected_payload_type)
: expected_sender_ssrc_(expected_sender_ssrc),
expected_payload_type_(expected_payload_type) {}
diff --git a/chromium/media/cast/net/rtp/rtp_parser.h b/chromium/media/cast/net/rtp/rtp_parser.h
index 64586d27c97..821507cce80 100644
--- a/chromium/media/cast/net/rtp/rtp_parser.h
+++ b/chromium/media/cast/net/rtp/rtp_parser.h
@@ -33,6 +33,8 @@ class RtpParser {
const uint8** payload_data,
size_t* payload_size);
+ static bool ParseSsrc(const uint8* packet, size_t length, uint32* ssrc);
+
private:
const uint32 expected_sender_ssrc_;
const uint8 expected_payload_type_;
diff --git a/chromium/media/cast/net/rtp/rtp_parser_unittest.cc b/chromium/media/cast/net/rtp/rtp_parser_unittest.cc
index 40822c8966f..1eca5567776 100644
--- a/chromium/media/cast/net/rtp/rtp_parser_unittest.cc
+++ b/chromium/media/cast/net/rtp/rtp_parser_unittest.cc
@@ -56,7 +56,7 @@ class RtpParserTest : public ::testing::Test {
EXPECT_EQ(cast_header_.reference_frame_id,
parsed_header.reference_frame_id);
- EXPECT_TRUE(!!payload);
+ EXPECT_TRUE(payload);
EXPECT_NE(static_cast<size_t>(-1), payload_size);
}
diff --git a/chromium/media/cast/net/rtp/rtp_sender.cc b/chromium/media/cast/net/rtp/rtp_sender.cc
index c0a75757116..2ecabcb44c7 100644
--- a/chromium/media/cast/net/rtp/rtp_sender.cc
+++ b/chromium/media/cast/net/rtp/rtp_sender.cc
@@ -7,8 +7,6 @@
#include "base/big_endian.h"
#include "base/logging.h"
#include "base/rand_util.h"
-#include "media/cast/net/cast_transport_defines.h"
-#include "media/cast/net/pacing/paced_sender.h"
namespace media {
namespace cast {
@@ -27,11 +25,9 @@ PacketRef FastCopyPacket(const PacketRef& packet) {
} // namespace
RtpSender::RtpSender(
- base::TickClock* clock,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
PacedSender* const transport)
- : clock_(clock),
- transport_(transport),
+ : transport_(transport),
transport_task_runner_(transport_task_runner),
weak_factory_(this) {
// Randomly set sequence number start value.
diff --git a/chromium/media/cast/net/rtp/rtp_sender.h b/chromium/media/cast/net/rtp/rtp_sender.h
index a2a5c3871d2..675658109e3 100644
--- a/chromium/media/cast/net/rtp/rtp_sender.h
+++ b/chromium/media/cast/net/rtp/rtp_sender.h
@@ -12,7 +12,6 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
@@ -32,7 +31,6 @@ namespace cast {
class RtpSender {
public:
RtpSender(
- base::TickClock* clock,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
PacedSender* const transport);
@@ -69,7 +67,6 @@ class RtpSender {
private:
void UpdateSequenceNumber(Packet* packet);
- base::TickClock* clock_; // Not owned by this class.
RtpPacketizerConfig config_;
PacketStorage storage_;
scoped_ptr<RtpPacketizer> packetizer_;
diff --git a/chromium/media/cast/net/udp_transport.cc b/chromium/media/cast/net/udp_transport.cc
index 43ef8628408..064a9d90b23 100644
--- a/chromium/media/cast/net/udp_transport.cc
+++ b/chromium/media/cast/net/udp_transport.cc
@@ -9,8 +9,6 @@
#include "base/bind.h"
#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/rand_util.h"
#include "net/base/io_buffer.h"
@@ -65,20 +63,25 @@ UdpTransport::UdpTransport(
UdpTransport::~UdpTransport() {}
void UdpTransport::StartReceiving(
- const PacketReceiverCallback& packet_receiver) {
+ const PacketReceiverCallbackWithStatus& packet_receiver) {
DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
packet_receiver_ = packet_receiver;
- udp_socket_->AllowAddressReuse();
udp_socket_->SetMulticastLoopbackMode(true);
if (!IsEmpty(local_addr_)) {
- if (udp_socket_->Bind(local_addr_) < 0) {
+ if (udp_socket_->Open(local_addr_.GetFamily()) < 0 ||
+ udp_socket_->AllowAddressReuse() < 0 ||
+ udp_socket_->Bind(local_addr_) < 0) {
+ udp_socket_->Close();
status_callback_.Run(TRANSPORT_SOCKET_ERROR);
LOG(ERROR) << "Failed to bind local address.";
return;
}
} else if (!IsEmpty(remote_addr_)) {
- if (udp_socket_->Connect(remote_addr_) < 0) {
+ if (udp_socket_->Open(remote_addr_.GetFamily()) < 0 ||
+ udp_socket_->AllowAddressReuse() < 0 ||
+ udp_socket_->Connect(remote_addr_) < 0) {
+ udp_socket_->Close();
status_callback_.Run(TRANSPORT_SOCKET_ERROR);
LOG(ERROR) << "Failed to connect to remote address.";
return;
@@ -94,11 +97,24 @@ void UdpTransport::StartReceiving(
ScheduleReceiveNextPacket();
}
+void UdpTransport::StopReceiving() {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ packet_receiver_ = PacketReceiverCallbackWithStatus();
+}
+
+
void UdpTransport::SetDscp(net::DiffServCodePoint dscp) {
DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
next_dscp_value_ = dscp;
}
+#if defined(OS_WIN)
+void UdpTransport::UseNonBlockingIO() {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ udp_socket_->UseNonBlockingIO();
+}
+#endif
+
void UdpTransport::ScheduleReceiveNextPacket() {
DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
if (!packet_receiver_.is_null() && !receive_pending_) {
@@ -113,6 +129,9 @@ void UdpTransport::ScheduleReceiveNextPacket() {
void UdpTransport::ReceiveNextPacket(int length_or_status) {
DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ if (packet_receiver_.is_null())
+ return;
+
// Loop while UdpSocket is delivering data synchronously. When it responds
// with a "pending" status, break and expect this method to be called back in
// the future when a packet is ready.
@@ -150,15 +169,18 @@ void UdpTransport::ReceiveNextPacket(int length_or_status) {
remote_addr_ = recv_addr_;
VLOG(1) << "Setting remote address from first received packet: "
<< remote_addr_.ToString();
+ next_packet_->resize(length_or_status);
+ if (!packet_receiver_.Run(next_packet_.Pass())) {
+ VLOG(1) << "Packet was not valid, resetting remote address.";
+ remote_addr_ = net::IPEndPoint();
+ }
} else if (!IsEqual(remote_addr_, recv_addr_)) {
VLOG(1) << "Ignoring packet received from an unrecognized address: "
<< recv_addr_.ToString() << ".";
- length_or_status = net::ERR_IO_PENDING;
- continue;
+ } else {
+ next_packet_->resize(length_or_status);
+ packet_receiver_.Run(next_packet_.Pass());
}
-
- next_packet_->resize(length_or_status);
- packet_receiver_.Run(next_packet_.Pass());
length_or_status = net::ERR_IO_PENDING;
}
}
diff --git a/chromium/media/cast/net/udp_transport.h b/chromium/media/cast/net/udp_transport.h
index 0ee634680ff..ddeb8fcdd04 100644
--- a/chromium/media/cast/net/udp_transport.h
+++ b/chromium/media/cast/net/udp_transport.h
@@ -43,18 +43,24 @@ class UdpTransport : public PacketSender {
const net::IPEndPoint& remote_end_point,
int32 send_buffer_size,
const CastTransportStatusCallback& status_callback);
- ~UdpTransport() override;
+ ~UdpTransport() final;
// Start receiving packets. Packets are submitted to |packet_receiver|.
- void StartReceiving(const PacketReceiverCallback& packet_receiver);
+ void StartReceiving(const PacketReceiverCallbackWithStatus& packet_receiver);
+ void StopReceiving();
// Set a new DSCP value to the socket. The value will be set right before
// the next send.
void SetDscp(net::DiffServCodePoint dscp);
+#if defined(OS_WIN)
+ // Switch to use non-blocking IO. Must be called before StartReceiving().
+ void UseNonBlockingIO();
+#endif
+
// PacketSender implementations.
- bool SendPacket(PacketRef packet, const base::Closure& cb) override;
- int64 GetBytesSent() override;
+ bool SendPacket(PacketRef packet, const base::Closure& cb) final;
+ int64 GetBytesSent() final;
private:
// Requests and processes packets from |udp_socket_|. This method is called
@@ -82,7 +88,7 @@ class UdpTransport : public PacketSender {
scoped_ptr<Packet> next_packet_;
scoped_refptr<net::WrappedIOBuffer> recv_buf_;
net::IPEndPoint recv_addr_;
- PacketReceiverCallback packet_receiver_;
+ PacketReceiverCallbackWithStatus packet_receiver_;
int32 send_buffer_size_;
const CastTransportStatusCallback status_callback_;
int bytes_sent_;
diff --git a/chromium/media/cast/net/udp_transport_unittest.cc b/chromium/media/cast/net/udp_transport_unittest.cc
index 2bc9bab58ad..00a219f8670 100644
--- a/chromium/media/cast/net/udp_transport_unittest.cc
+++ b/chromium/media/cast/net/udp_transport_unittest.cc
@@ -25,14 +25,15 @@ class MockPacketReceiver {
MockPacketReceiver(const base::Closure& callback)
: packet_callback_(callback) {}
- void ReceivedPacket(scoped_ptr<Packet> packet) {
+ bool ReceivedPacket(scoped_ptr<Packet> packet) {
packet_ = std::string(packet->size(), '\0');
std::copy(packet->begin(), packet->end(), packet_.begin());
packet_callback_.Run();
+ return true;
}
std::string packet() const { return packet_; }
- PacketReceiverCallback packet_receiver() {
+ PacketReceiverCallbackWithStatus packet_receiver() {
return base::Bind(&MockPacketReceiver::ReceivedPacket,
base::Unretained(this));
}
diff --git a/chromium/media/cast/receiver/audio_decoder.cc b/chromium/media/cast/receiver/audio_decoder.cc
index 5f4720e8fc5..f8a40be841c 100644
--- a/chromium/media/cast/receiver/audio_decoder.cc
+++ b/chromium/media/cast/receiver/audio_decoder.cc
@@ -8,7 +8,6 @@
#include "base/bind_helpers.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/memory/ref_counted.h"
#include "base/sys_byteorder.h"
#include "media/cast/cast_defines.h"
#include "third_party/opus/src/include/opus.h"
@@ -29,22 +28,22 @@ class AudioDecoder::ImplBase
: cast_environment_(cast_environment),
codec_(codec),
num_channels_(num_channels),
- cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
+ operational_status_(STATUS_UNINITIALIZED),
seen_first_frame_(false) {
if (num_channels_ <= 0 || sampling_rate <= 0 || sampling_rate % 100 != 0)
- cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
+ operational_status_ = STATUS_INVALID_CONFIGURATION;
}
- CastInitializationStatus InitializationResult() const {
- return cast_initialization_status_;
+ OperationalStatus InitializationResult() const {
+ return operational_status_;
}
void DecodeFrame(scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
- DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
+ DCHECK_EQ(operational_status_, STATUS_INITIALIZED);
- COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
- size_of_frame_id_types_do_not_match);
+ static_assert(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
+ "size of frame_id types do not match");
bool is_continuous = true;
if (seen_first_frame_) {
const uint32 frames_ahead = encoded_frame->frame_id - last_frame_id_;
@@ -80,8 +79,8 @@ class AudioDecoder::ImplBase
const Codec codec_;
const int num_channels_;
- // Subclass' ctor is expected to set this to STATUS_AUDIO_INITIALIZED.
- CastInitializationStatus cast_initialization_status_;
+ // Subclass' ctor is expected to set this to STATUS_INITIALIZED.
+ OperationalStatus operational_status_;
private:
bool seen_first_frame_;
@@ -104,21 +103,20 @@ class AudioDecoder::OpusImpl : public AudioDecoder::ImplBase {
max_samples_per_frame_(
kOpusMaxFrameDurationMillis * sampling_rate / 1000),
buffer_(new float[max_samples_per_frame_ * num_channels]) {
- if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ if (ImplBase::operational_status_ != STATUS_UNINITIALIZED)
return;
if (opus_decoder_init(opus_decoder_, sampling_rate, num_channels) !=
OPUS_OK) {
- ImplBase::cast_initialization_status_ =
- STATUS_INVALID_AUDIO_CONFIGURATION;
+ ImplBase::operational_status_ = STATUS_INVALID_CONFIGURATION;
return;
}
- ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ ImplBase::operational_status_ = STATUS_INITIALIZED;
}
private:
- ~OpusImpl() override {}
+ ~OpusImpl() final {}
- void RecoverBecauseFramesWereDropped() override {
+ void RecoverBecauseFramesWereDropped() final {
// Passing NULL for the input data notifies the decoder of frame loss.
const opus_int32 result =
opus_decode_float(
@@ -126,7 +124,7 @@ class AudioDecoder::OpusImpl : public AudioDecoder::ImplBase {
DCHECK_GE(result, 0);
}
- scoped_ptr<AudioBus> Decode(uint8* data, int len) override {
+ scoped_ptr<AudioBus> Decode(uint8* data, int len) final {
scoped_ptr<AudioBus> audio_bus;
const opus_int32 num_samples_decoded = opus_decode_float(
opus_decoder_, data, len, buffer_.get(), max_samples_per_frame_, 0);
@@ -169,15 +167,15 @@ class AudioDecoder::Pcm16Impl : public AudioDecoder::ImplBase {
CODEC_AUDIO_PCM16,
num_channels,
sampling_rate) {
- if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ if (ImplBase::operational_status_ != STATUS_UNINITIALIZED)
return;
- ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ ImplBase::operational_status_ = STATUS_INITIALIZED;
}
private:
- ~Pcm16Impl() override {}
+ ~Pcm16Impl() final {}
- scoped_ptr<AudioBus> Decode(uint8* data, int len) override {
+ scoped_ptr<AudioBus> Decode(uint8* data, int len) final {
scoped_ptr<AudioBus> audio_bus;
const int num_samples = len / sizeof(int16) / num_channels_;
if (num_samples <= 0)
@@ -219,10 +217,10 @@ AudioDecoder::AudioDecoder(
AudioDecoder::~AudioDecoder() {}
-CastInitializationStatus AudioDecoder::InitializationResult() const {
+OperationalStatus AudioDecoder::InitializationResult() const {
if (impl_.get())
return impl_->InitializationResult();
- return STATUS_UNSUPPORTED_AUDIO_CODEC;
+ return STATUS_UNSUPPORTED_CODEC;
}
void AudioDecoder::DecodeFrame(
@@ -230,8 +228,7 @@ void AudioDecoder::DecodeFrame(
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
- if (!impl_.get() ||
- impl_->InitializationResult() != STATUS_AUDIO_INITIALIZED) {
+ if (!impl_.get() || impl_->InitializationResult() != STATUS_INITIALIZED) {
callback.Run(make_scoped_ptr<AudioBus>(NULL), false);
return;
}
diff --git a/chromium/media/cast/receiver/audio_decoder.h b/chromium/media/cast/receiver/audio_decoder.h
index 0b13eae6a8f..a68b6fb9e57 100644
--- a/chromium/media/cast/receiver/audio_decoder.h
+++ b/chromium/media/cast/receiver/audio_decoder.h
@@ -32,10 +32,10 @@ class AudioDecoder {
Codec codec);
virtual ~AudioDecoder();
- // Returns STATUS_AUDIO_INITIALIZED if the decoder was successfully
- // constructed from the given FrameReceiverConfig. If this method returns any
- // other value, calls to DecodeFrame() will not succeed.
- CastInitializationStatus InitializationResult() const;
+ // Returns STATUS_INITIALIZED if the decoder was successfully constructed from
+ // the given FrameReceiverConfig. If this method returns any other value,
+ // calls to DecodeFrame() will not succeed.
+ OperationalStatus InitializationResult() const;
// Decode the payload in |encoded_frame| asynchronously. |callback| will be
// invoked on the CastEnvironment::MAIN thread with the result.
diff --git a/chromium/media/cast/receiver/audio_decoder_unittest.cc b/chromium/media/cast/receiver/audio_decoder_unittest.cc
index ac13af10bc8..2e6a1dc9292 100644
--- a/chromium/media/cast/receiver/audio_decoder_unittest.cc
+++ b/chromium/media/cast/receiver/audio_decoder_unittest.cc
@@ -41,12 +41,12 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
}
protected:
- void SetUp() override {
+ void SetUp() final {
audio_decoder_.reset(new AudioDecoder(cast_environment_,
GetParam().num_channels,
GetParam().sampling_rate,
GetParam().codec));
- CHECK_EQ(STATUS_AUDIO_INITIALIZED, audio_decoder_->InitializationResult());
+ CHECK_EQ(STATUS_INITIALIZED, audio_decoder_->InitializationResult());
audio_bus_factory_.reset(
new TestAudioBusFactory(GetParam().num_channels,
@@ -150,7 +150,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// A NULL |audio_bus| indicates a decode error, which we don't expect.
- ASSERT_FALSE(!audio_bus);
+ ASSERT_TRUE(audio_bus);
// Did the decoder detect whether frames were dropped?
EXPECT_EQ(should_be_continuous, is_continuous);
diff --git a/chromium/media/cast/receiver/cast_receiver_impl.cc b/chromium/media/cast/receiver/cast_receiver_impl.cc
index 8265211c71c..5a01bb0890f 100644
--- a/chromium/media/cast/receiver/cast_receiver_impl.cc
+++ b/chromium/media/cast/receiver/cast_receiver_impl.cc
@@ -7,9 +7,9 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/trace_event/trace_event.h"
#include "media/cast/receiver/audio_decoder.h"
#include "media/cast/receiver/video_decoder.h"
@@ -20,42 +20,36 @@ scoped_ptr<CastReceiver> CastReceiver::Create(
scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- PacketSender* const packet_sender) {
+ CastTransportSender* const transport) {
return scoped_ptr<CastReceiver>(new CastReceiverImpl(
- cast_environment, audio_config, video_config, packet_sender));
+ cast_environment, audio_config, video_config, transport));
}
CastReceiverImpl::CastReceiverImpl(
scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- PacketSender* const packet_sender)
+ CastTransportSender* const transport)
: cast_environment_(cast_environment),
- pacer_(kTargetBurstSize,
- kMaxBurstSize,
- cast_environment->Clock(),
- cast_environment->Logging(),
- packet_sender,
- cast_environment->GetTaskRunner(CastEnvironment::MAIN)),
- audio_receiver_(cast_environment, audio_config, AUDIO_EVENT, &pacer_),
- video_receiver_(cast_environment, video_config, VIDEO_EVENT, &pacer_),
- ssrc_of_audio_sender_(audio_config.incoming_ssrc),
- ssrc_of_video_sender_(video_config.incoming_ssrc),
+ audio_receiver_(cast_environment, audio_config, AUDIO_EVENT, transport),
+ video_receiver_(cast_environment, video_config, VIDEO_EVENT, transport),
+ ssrc_of_audio_sender_(audio_config.sender_ssrc),
+ ssrc_of_video_sender_(video_config.sender_ssrc),
num_audio_channels_(audio_config.channels),
- audio_sampling_rate_(audio_config.frequency),
+ audio_sampling_rate_(audio_config.rtp_timebase),
audio_codec_(audio_config.codec),
video_codec_(video_config.codec) {}
CastReceiverImpl::~CastReceiverImpl() {}
-void CastReceiverImpl::DispatchReceivedPacket(scoped_ptr<Packet> packet) {
+void CastReceiverImpl::ReceivePacket(scoped_ptr<Packet> packet) {
const uint8_t* const data = &packet->front();
const size_t length = packet->size();
uint32 ssrc_of_sender;
if (Rtcp::IsRtcpPacket(data, length)) {
ssrc_of_sender = Rtcp::GetSsrcOfSender(data, length);
- } else if (!FrameReceiver::ParseSenderSsrc(data, length, &ssrc_of_sender)) {
+ } else if (!RtpParser::ParseSsrc(data, length, &ssrc_of_sender)) {
VLOG(1) << "Invalid RTP packet.";
return;
}
@@ -78,14 +72,6 @@ void CastReceiverImpl::DispatchReceivedPacket(scoped_ptr<Packet> packet) {
base::Passed(&packet)));
}
-PacketReceiverCallback CastReceiverImpl::packet_receiver() {
- return base::Bind(&CastReceiverImpl::DispatchReceivedPacket,
- // TODO(miu): This code structure is dangerous, since the
- // callback could be stored and then invoked after
- // destruction of |this|.
- base::Unretained(this));
-}
-
void CastReceiverImpl::RequestDecodedAudioFrame(
const AudioFrameDecodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
diff --git a/chromium/media/cast/receiver/cast_receiver_impl.h b/chromium/media/cast/receiver/cast_receiver_impl.h
index 5431393d73d..6cd6ba3d7fd 100644
--- a/chromium/media/cast/receiver/cast_receiver_impl.h
+++ b/chromium/media/cast/receiver/cast_receiver_impl.h
@@ -27,26 +27,22 @@ class CastReceiverImpl : public CastReceiver {
CastReceiverImpl(scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- PacketSender* const packet_sender);
+ CastTransportSender* const transport);
- ~CastReceiverImpl() override;
+ ~CastReceiverImpl() final;
// CastReceiver implementation.
- PacketReceiverCallback packet_receiver() override;
+ void ReceivePacket(scoped_ptr<Packet> packet) final;
void RequestDecodedAudioFrame(
- const AudioFrameDecodedCallback& callback) override;
+ const AudioFrameDecodedCallback& callback) final;
void RequestEncodedAudioFrame(
- const ReceiveEncodedFrameCallback& callback) override;
+ const ReceiveEncodedFrameCallback& callback) final;
void RequestDecodedVideoFrame(
- const VideoFrameDecodedCallback& callback) override;
+ const VideoFrameDecodedCallback& callback) final;
void RequestEncodedVideoFrame(
- const ReceiveEncodedFrameCallback& callback) override;
+ const ReceiveEncodedFrameCallback& callback) final;
private:
- // Forwards |packet| to a specific RTP frame receiver, or drops it if SSRC
- // does not map to one of the receivers.
- void DispatchReceivedPacket(scoped_ptr<Packet> packet);
-
// Feeds an EncodedFrame into |audio_decoder_|. RequestDecodedAudioFrame()
// uses this as a callback for RequestEncodedAudioFrame().
void DecodeEncodedAudioFrame(
@@ -88,7 +84,6 @@ class CastReceiverImpl : public CastReceiver {
bool is_continuous);
const scoped_refptr<CastEnvironment> cast_environment_;
- PacedSender pacer_;
FrameReceiver audio_receiver_;
FrameReceiver video_receiver_;
diff --git a/chromium/media/cast/receiver/frame_receiver.cc b/chromium/media/cast/receiver/frame_receiver.cc
index 095e6115533..c0b15df8dec 100644
--- a/chromium/media/cast/receiver/frame_receiver.cc
+++ b/chromium/media/cast/receiver/frame_receiver.cc
@@ -23,36 +23,37 @@ FrameReceiver::FrameReceiver(
const scoped_refptr<CastEnvironment>& cast_environment,
const FrameReceiverConfig& config,
EventMediaType event_media_type,
- PacedPacketSender* const packet_sender)
+ CastTransportSender* const transport)
: cast_environment_(cast_environment),
- packet_parser_(config.incoming_ssrc, config.rtp_payload_type),
+ transport_(transport),
+ packet_parser_(config.sender_ssrc, config.rtp_payload_type),
stats_(cast_environment->Clock()),
event_media_type_(event_media_type),
event_subscriber_(kReceiverRtcpEventHistorySize, event_media_type),
- rtp_timebase_(config.frequency),
+ rtp_timebase_(config.rtp_timebase),
target_playout_delay_(
base::TimeDelta::FromMilliseconds(config.rtp_max_delay_ms)),
expected_frame_duration_(
- base::TimeDelta::FromSeconds(1) / config.max_frame_rate),
+ base::TimeDelta::FromSeconds(1) / config.target_frame_rate),
reports_are_scheduled_(false),
framer_(cast_environment->Clock(),
this,
- config.incoming_ssrc,
+ config.sender_ssrc,
true,
- config.rtp_max_delay_ms * config.max_frame_rate / 1000),
+ config.rtp_max_delay_ms * config.target_frame_rate / 1000),
rtcp_(RtcpCastMessageCallback(),
RtcpRttCallback(),
RtcpLogMessageCallback(),
cast_environment_->Clock(),
- packet_sender,
- config.feedback_ssrc,
- config.incoming_ssrc),
+ NULL,
+ config.receiver_ssrc,
+ config.sender_ssrc),
is_waiting_for_consecutive_frame_(false),
lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()),
- rtcp_interval_(base::TimeDelta::FromMilliseconds(config.rtcp_interval)),
weak_factory_(this) {
+ transport_->AddValidSsrc(config.sender_ssrc);
DCHECK_GT(config.rtp_max_delay_ms, 0);
- DCHECK_GT(config.max_frame_rate, 0);
+ DCHECK_GT(config.target_frame_rate, 0);
decryptor_.Initialize(config.aes_key, config.aes_iv_mask);
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
@@ -100,15 +101,6 @@ bool FrameReceiver::ProcessPacket(scoped_ptr<Packet> packet) {
return true;
}
-// static
-bool FrameReceiver::ParseSenderSsrc(const uint8* packet,
- size_t length,
- uint32* ssrc) {
- base::BigEndianReader big_endian_reader(
- reinterpret_cast<const char*>(packet), length);
- return big_endian_reader.Skip(8) && big_endian_reader.ReadU32(ssrc);
-}
-
void FrameReceiver::ProcessParsedPacket(const RtpCastHeader& rtp_header,
const uint8* payload_data,
size_t payload_size) {
@@ -177,10 +169,15 @@ void FrameReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
now, FRAME_ACK_SENT, event_media_type_,
rtp_timestamp, cast_message.ack_frame_id);
- ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
- rtcp_.SendRtcpFromRtpReceiver(&cast_message, target_playout_delay_,
- &rtcp_events, NULL);
+ ReceiverRtcpEventSubscriber::RtcpEvents rtcp_events;
+ event_subscriber_.GetRtcpEventsWithRedundancy(&rtcp_events);
+ transport_->SendRtcpFromRtpReceiver(rtcp_.GetLocalSsrc(),
+ rtcp_.GetRemoteSsrc(),
+ rtcp_.ConvertToNTPAndSave(now),
+ &cast_message,
+ target_playout_delay_,
+ &rtcp_events,
+ NULL);
}
void FrameReceiver::EmitAvailableEncodedFrames() {
@@ -322,21 +319,25 @@ void FrameReceiver::SendNextCastMessage() {
void FrameReceiver::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next = rtcp_interval_;
- time_to_next = std::max(
- time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
cast_environment_->PostDelayedTask(
- CastEnvironment::MAIN,
- FROM_HERE,
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(&FrameReceiver::SendNextRtcpReport,
weak_factory_.GetWeakPtr()),
- time_to_next);
+ base::TimeDelta::FromMilliseconds(kDefaultRtcpIntervalMs));
}
void FrameReceiver::SendNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- rtcp_.SendRtcpFromRtpReceiver(NULL, base::TimeDelta(), NULL, &stats_);
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ RtpReceiverStatistics stats = stats_.GetStatistics();
+ transport_->SendRtcpFromRtpReceiver(rtcp_.GetLocalSsrc(),
+ rtcp_.GetRemoteSsrc(),
+ rtcp_.ConvertToNTPAndSave(now),
+ NULL,
+ base::TimeDelta(),
+ NULL,
+ &stats);
ScheduleNextRtcpReport();
}
diff --git a/chromium/media/cast/receiver/frame_receiver.h b/chromium/media/cast/receiver/frame_receiver.h
index 4d673f34a7e..f4037dd9a11 100644
--- a/chromium/media/cast/receiver/frame_receiver.h
+++ b/chromium/media/cast/receiver/frame_receiver.h
@@ -50,9 +50,9 @@ class FrameReceiver : public RtpPayloadFeedback,
FrameReceiver(const scoped_refptr<CastEnvironment>& cast_environment,
const FrameReceiverConfig& config,
EventMediaType event_media_type,
- PacedPacketSender* const packet_sender);
+ CastTransportSender* const transport);
- ~FrameReceiver() override;
+ ~FrameReceiver() final;
// Request an encoded frame.
//
@@ -64,10 +64,6 @@ class FrameReceiver : public RtpPayloadFeedback,
// out-of-order. Returns true if the parsing of the packet succeeded.
bool ProcessPacket(scoped_ptr<Packet> packet);
- // TODO(miu): This is the wrong place for this, but the (de)serialization
- // implementation needs to be consolidated first.
- static bool ParseSenderSsrc(const uint8* packet, size_t length, uint32* ssrc);
-
protected:
friend class FrameReceiverTest; // Invokes ProcessParsedPacket().
@@ -76,7 +72,7 @@ class FrameReceiver : public RtpPayloadFeedback,
size_t payload_size);
// RtpPayloadFeedback implementation.
- void CastFeedback(const RtcpCastMessage& cast_message) override;
+ void CastFeedback(const RtcpCastMessage& cast_message) final;
private:
// Processes ready-to-consume packets from |framer_|, decrypting each packet's
@@ -115,6 +111,9 @@ class FrameReceiver : public RtpPayloadFeedback,
const scoped_refptr<CastEnvironment> cast_environment_;
+ // Transport used to send data back.
+ CastTransportSender* const transport_;
+
// Deserializes a packet into a RtpHeader + payload bytes.
RtpParser packet_parser_;
@@ -179,9 +178,6 @@ class FrameReceiver : public RtpPayloadFeedback,
base::TimeTicks lip_sync_reference_time_;
ClockDriftSmoother lip_sync_drift_;
- // Time interval for sending a RTCP report.
- const base::TimeDelta rtcp_interval_;
-
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<FrameReceiver> weak_factory_;
diff --git a/chromium/media/cast/receiver/frame_receiver_unittest.cc b/chromium/media/cast/receiver/frame_receiver_unittest.cc
index 24e1a0b9eeb..a92b93a73cd 100644
--- a/chromium/media/cast/receiver/frame_receiver_unittest.cc
+++ b/chromium/media/cast/receiver/frame_receiver_unittest.cc
@@ -12,7 +12,8 @@
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
#include "media/cast/logging/simple_event_subscriber.h"
-#include "media/cast/net/pacing/mock_paced_packet_sender.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
+#include "media/cast/net/mock_cast_transport_sender.h"
#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/receiver/frame_receiver.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
@@ -43,7 +44,7 @@ class FakeFrameClient {
void DeliverEncodedFrame(scoped_ptr<EncodedFrame> frame) {
SCOPED_TRACE(::testing::Message() << "num_called_ is " << num_called_);
- ASSERT_FALSE(!frame)
+ ASSERT_TRUE(frame)
<< "If at shutdown: There were unsatisfied requests enqueued.";
ASSERT_FALSE(expected_results_.empty());
EXPECT_EQ(expected_results_.front().first, frame->frame_id);
@@ -79,7 +80,7 @@ class FrameReceiverTest : public ::testing::Test {
~FrameReceiverTest() override {}
- void SetUp() override {
+ void SetUp() final {
payload_.assign(kPacketSize, 0);
// Always start with a key frame.
@@ -104,7 +105,7 @@ class FrameReceiverTest : public ::testing::Test {
config_.rtp_max_delay_ms = kPlayoutDelayMillis;
// Note: Frame rate must divide 1000 without remainder so the test code
// doesn't have to account for rounding errors.
- config_.max_frame_rate = 25;
+ config_.target_frame_rate = 25;
receiver_.reset(new FrameReceiver(
cast_environment_, config_, VIDEO_EVENT, &mock_transport_));
@@ -119,13 +120,13 @@ class FrameReceiverTest : public ::testing::Test {
void FeedLipSyncInfoIntoReceiver() {
const base::TimeTicks now = testing_clock_->NowTicks();
const int64 rtp_timestamp = (now - start_time_) *
- config_.frequency / base::TimeDelta::FromSeconds(1);
+ config_.rtp_timebase / base::TimeDelta::FromSeconds(1);
CHECK_LE(0, rtp_timestamp);
uint32 ntp_seconds;
uint32 ntp_fraction;
ConvertTimeTicksToNtp(now, &ntp_seconds, &ntp_fraction);
TestRtcpPacketBuilder rtcp_packet;
- rtcp_packet.AddSrWithNtp(config_.incoming_ssrc,
+ rtcp_packet.AddSrWithNtp(config_.sender_ssrc,
ntp_seconds, ntp_fraction,
static_cast<uint32>(rtp_timestamp));
ASSERT_TRUE(receiver_->ProcessPacket(rtcp_packet.GetPacket().Pass()));
@@ -136,7 +137,7 @@ class FrameReceiverTest : public ::testing::Test {
RtpCastHeader rtp_header_;
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
base::TimeTicks start_time_;
- MockPacedPacketSender mock_transport_;
+ MockCastTransportSender mock_transport_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
FakeFrameClient frame_client_;
@@ -145,6 +146,7 @@ class FrameReceiverTest : public ::testing::Test {
// must remain alive until after its destruction.
scoped_ptr<FrameReceiver> receiver_;
+ private:
DISALLOW_COPY_AND_ASSIGN(FrameReceiverTest);
};
@@ -171,8 +173,8 @@ TEST_F(FrameReceiverTest, ReceivesOneFrame) {
SimpleEventSubscriber event_subscriber;
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
- .WillRepeatedly(testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendRtcpFromRtpReceiver(_, _, _, _, _, _, _))
+ .WillRepeatedly(testing::Return());
FeedLipSyncInfoIntoReceiver();
task_runner_->RunTasks();
@@ -212,13 +214,13 @@ TEST_F(FrameReceiverTest, ReceivesFramesSkippingWhenAppropriate) {
SimpleEventSubscriber event_subscriber;
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
- .WillRepeatedly(testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendRtcpFromRtpReceiver(_, _, _, _, _, _, _))
+ .WillRepeatedly(testing::Return());
const uint32 rtp_advance_per_frame =
- config_.frequency / config_.max_frame_rate;
+ config_.rtp_timebase / config_.target_frame_rate;
const base::TimeDelta time_advance_per_frame =
- base::TimeDelta::FromSeconds(1) / config_.max_frame_rate;
+ base::TimeDelta::FromSeconds(1) / config_.target_frame_rate;
// Feed and process lip sync in receiver.
FeedLipSyncInfoIntoReceiver();
@@ -315,13 +317,13 @@ TEST_F(FrameReceiverTest, ReceivesFramesRefusingToSkipAny) {
SimpleEventSubscriber event_subscriber;
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
- .WillRepeatedly(testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendRtcpFromRtpReceiver(_, _, _, _, _, _, _))
+ .WillRepeatedly(testing::Return());
const uint32 rtp_advance_per_frame =
- config_.frequency / config_.max_frame_rate;
+ config_.rtp_timebase / config_.target_frame_rate;
const base::TimeDelta time_advance_per_frame =
- base::TimeDelta::FromSeconds(1) / config_.max_frame_rate;
+ base::TimeDelta::FromSeconds(1) / config_.target_frame_rate;
// Feed and process lip sync in receiver.
FeedLipSyncInfoIntoReceiver();
diff --git a/chromium/media/cast/receiver/video_decoder.cc b/chromium/media/cast/receiver/video_decoder.cc
index e536c3e23b5..b2a2e3e2b61 100644
--- a/chromium/media/cast/receiver/video_decoder.cc
+++ b/chromium/media/cast/receiver/video_decoder.cc
@@ -18,7 +18,7 @@
#define VPX_CODEC_DISABLE_COMPAT 1
#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
namespace cast {
@@ -33,19 +33,19 @@ class VideoDecoder::ImplBase
Codec codec)
: cast_environment_(cast_environment),
codec_(codec),
- cast_initialization_status_(STATUS_VIDEO_UNINITIALIZED),
+ operational_status_(STATUS_UNINITIALIZED),
seen_first_frame_(false) {}
- CastInitializationStatus InitializationResult() const {
- return cast_initialization_status_;
+ OperationalStatus InitializationResult() const {
+ return operational_status_;
}
void DecodeFrame(scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
- DCHECK_EQ(cast_initialization_status_, STATUS_VIDEO_INITIALIZED);
+ DCHECK_EQ(operational_status_, STATUS_INITIALIZED);
- COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
- size_of_frame_id_types_do_not_match);
+ static_assert(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
+ "size of frame_id types do not match");
bool is_continuous = true;
if (seen_first_frame_) {
const uint32 frames_ahead = encoded_frame->frame_id - last_frame_id_;
@@ -79,8 +79,8 @@ class VideoDecoder::ImplBase
const scoped_refptr<CastEnvironment> cast_environment_;
const Codec codec_;
- // Subclass' ctor is expected to set this to STATUS_VIDEO_INITIALIZED.
- CastInitializationStatus cast_initialization_status_;
+ // Subclass' ctor is expected to set this to STATUS_INITIALIZED.
+ OperationalStatus operational_status_;
private:
bool seen_first_frame_;
@@ -93,7 +93,7 @@ class VideoDecoder::Vp8Impl : public VideoDecoder::ImplBase {
public:
explicit Vp8Impl(const scoped_refptr<CastEnvironment>& cast_environment)
: ImplBase(cast_environment, CODEC_VIDEO_VP8) {
- if (ImplBase::cast_initialization_status_ != STATUS_VIDEO_UNINITIALIZED)
+ if (ImplBase::operational_status_ != STATUS_UNINITIALIZED)
return;
vpx_codec_dec_cfg_t cfg = {0};
@@ -106,20 +106,19 @@ class VideoDecoder::Vp8Impl : public VideoDecoder::ImplBase {
vpx_codec_vp8_dx(),
&cfg,
VPX_CODEC_USE_POSTPROC) != VPX_CODEC_OK) {
- ImplBase::cast_initialization_status_ =
- STATUS_INVALID_VIDEO_CONFIGURATION;
+ ImplBase::operational_status_ = STATUS_INVALID_CONFIGURATION;
return;
}
- ImplBase::cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
+ ImplBase::operational_status_ = STATUS_INITIALIZED;
}
private:
- ~Vp8Impl() override {
- if (ImplBase::cast_initialization_status_ == STATUS_VIDEO_INITIALIZED)
+ ~Vp8Impl() final {
+ if (ImplBase::operational_status_ == STATUS_INITIALIZED)
CHECK_EQ(VPX_CODEC_OK, vpx_codec_destroy(&context_));
}
- scoped_refptr<VideoFrame> Decode(uint8* data, int len) override {
+ scoped_refptr<VideoFrame> Decode(uint8* data, int len) final {
if (len <= 0 || vpx_codec_decode(&context_,
data,
static_cast<unsigned int>(len),
@@ -175,15 +174,15 @@ class VideoDecoder::FakeImpl : public VideoDecoder::ImplBase {
explicit FakeImpl(const scoped_refptr<CastEnvironment>& cast_environment)
: ImplBase(cast_environment, CODEC_VIDEO_FAKE),
last_decoded_id_(-1) {
- if (ImplBase::cast_initialization_status_ != STATUS_VIDEO_UNINITIALIZED)
+ if (ImplBase::operational_status_ != STATUS_UNINITIALIZED)
return;
- ImplBase::cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
+ ImplBase::operational_status_ = STATUS_INITIALIZED;
}
private:
- ~FakeImpl() override {}
+ ~FakeImpl() final {}
- scoped_refptr<VideoFrame> Decode(uint8* data, int len) override {
+ scoped_refptr<VideoFrame> Decode(uint8* data, int len) final {
// Make sure this is a JSON string.
if (!len || data[0] != '{')
return NULL;
@@ -237,10 +236,10 @@ VideoDecoder::VideoDecoder(
VideoDecoder::~VideoDecoder() {}
-CastInitializationStatus VideoDecoder::InitializationResult() const {
+OperationalStatus VideoDecoder::InitializationResult() const {
if (impl_.get())
return impl_->InitializationResult();
- return STATUS_UNSUPPORTED_VIDEO_CODEC;
+ return STATUS_UNSUPPORTED_CODEC;
}
void VideoDecoder::DecodeFrame(
@@ -248,8 +247,7 @@ void VideoDecoder::DecodeFrame(
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
- if (!impl_.get() ||
- impl_->InitializationResult() != STATUS_VIDEO_INITIALIZED) {
+ if (!impl_.get() || impl_->InitializationResult() != STATUS_INITIALIZED) {
callback.Run(make_scoped_refptr<VideoFrame>(NULL), false);
return;
}
diff --git a/chromium/media/cast/receiver/video_decoder.h b/chromium/media/cast/receiver/video_decoder.h
index f3d8ca202ea..2b6c859c679 100644
--- a/chromium/media/cast/receiver/video_decoder.h
+++ b/chromium/media/cast/receiver/video_decoder.h
@@ -31,10 +31,10 @@ class VideoDecoder {
Codec codec);
virtual ~VideoDecoder();
- // Returns STATUS_VIDEO_INITIALIZED if the decoder was successfully
- // constructed from the given FrameReceiverConfig. If this method returns any
- // other value, calls to DecodeFrame() will not succeed.
- CastInitializationStatus InitializationResult() const;
+ // Returns STATUS_INITIALIZED if the decoder was successfully constructed from
+ // the given FrameReceiverConfig. If this method returns any other value,
+ // calls to DecodeFrame() will not succeed.
+ OperationalStatus InitializationResult() const;
// Decode the payload in |encoded_frame| asynchronously. |callback| will be
// invoked on the CastEnvironment::MAIN thread with the result.
diff --git a/chromium/media/cast/receiver/video_decoder_unittest.cc b/chromium/media/cast/receiver/video_decoder_unittest.cc
index f777cb38b26..085065ef95a 100644
--- a/chromium/media/cast/receiver/video_decoder_unittest.cc
+++ b/chromium/media/cast/receiver/video_decoder_unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <cstdlib>
+#include <vector>
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -22,14 +23,12 @@ namespace cast {
namespace {
-const int kWidth = 360;
-const int kHeight = 240;
+const int kStartingWidth = 360;
+const int kStartingHeight = 240;
const int kFrameRate = 10;
VideoSenderConfig GetVideoSenderConfigForTest() {
VideoSenderConfig config = GetDefaultVideoSenderConfig();
- config.width = kWidth;
- config.height = kHeight;
config.max_frame_rate = kFrameRate;
return config;
}
@@ -51,10 +50,11 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
}
protected:
- void SetUp() override {
+ void SetUp() final {
video_decoder_.reset(new VideoDecoder(cast_environment_, GetParam()));
- CHECK_EQ(STATUS_VIDEO_INITIALIZED, video_decoder_->InitializationResult());
+ CHECK_EQ(STATUS_INITIALIZED, video_decoder_->InitializationResult());
+ next_frame_size_ = gfx::Size(kStartingWidth, kStartingHeight);
next_frame_timestamp_ = base::TimeDelta();
last_frame_id_ = 0;
seen_a_decoded_frame_ = false;
@@ -63,34 +63,39 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
total_video_frames_decoded_ = 0;
}
+ void SetNextFrameSize(const gfx::Size& size) {
+ next_frame_size_ = size;
+ }
+
// Called from the unit test thread to create another EncodedFrame and push it
// into the decoding pipeline.
void FeedMoreVideo(int num_dropped_frames) {
// Prepare a simulated EncodedFrame to feed into the VideoDecoder.
- const gfx::Size frame_size(kWidth, kHeight);
const scoped_refptr<VideoFrame> video_frame =
VideoFrame::CreateFrame(VideoFrame::YV12,
- frame_size,
- gfx::Rect(frame_size),
- frame_size,
+ next_frame_size_,
+ gfx::Rect(next_frame_size_),
+ next_frame_size_,
next_frame_timestamp_);
+ const base::TimeTicks reference_time =
+ base::TimeTicks::UnixEpoch() + next_frame_timestamp_;
next_frame_timestamp_ += base::TimeDelta::FromSeconds(1) / kFrameRate;
PopulateVideoFrame(video_frame.get(), 0);
// Encode |frame| into |encoded_frame->data|.
- scoped_ptr<EncodedFrame> encoded_frame(
- new EncodedFrame());
+ scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
// Test only supports VP8, currently.
CHECK_EQ(CODEC_VIDEO_VP8, GetParam());
- vp8_encoder_.Encode(video_frame, base::TimeTicks(), encoded_frame.get());
+ vp8_encoder_.Encode(video_frame, reference_time, encoded_frame.get());
// Rewrite frame IDs for testing purposes.
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
- if (last_frame_id_ == 0)
+ if (encoded_frame->dependency == EncodedFrame::KEY)
encoded_frame->referenced_frame_id = encoded_frame->frame_id;
else
encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1;
last_frame_id_ = encoded_frame->frame_id;
+ ASSERT_EQ(reference_time, encoded_frame->reference_time);
{
base::AutoLock auto_lock(lock_);
@@ -127,7 +132,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// A NULL |video_frame| indicates a decode error, which we don't expect.
- ASSERT_FALSE(!video_frame.get());
+ ASSERT_TRUE(video_frame.get());
// Did the decoder detect whether frames were dropped?
EXPECT_EQ(should_be_continuous, is_continuous);
@@ -148,6 +153,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
const scoped_refptr<StandaloneCastEnvironment> cast_environment_;
scoped_ptr<VideoDecoder> video_decoder_;
+ gfx::Size next_frame_size_;
base::TimeDelta next_frame_timestamp_;
uint32 last_frame_id_;
bool seen_a_decoded_frame_;
@@ -186,7 +192,38 @@ TEST_P(VideoDecoderTest, RecoversFromDroppedFrames) {
WaitForAllVideoToBeDecoded();
}
-INSTANTIATE_TEST_CASE_P(VideoDecoderTestScenarios,
+TEST_P(VideoDecoderTest, DecodesFramesOfVaryingSizes) {
+ std::vector<gfx::Size> frame_sizes;
+ frame_sizes.push_back(gfx::Size(1280, 720));
+ frame_sizes.push_back(gfx::Size(640, 360)); // Shrink both dimensions.
+ frame_sizes.push_back(gfx::Size(300, 200)); // Shrink both dimensions again.
+ frame_sizes.push_back(gfx::Size(200, 300)); // Same area.
+ frame_sizes.push_back(gfx::Size(600, 400)); // Grow both dimensions.
+ frame_sizes.push_back(gfx::Size(638, 400)); // Shrink only one dimension.
+ frame_sizes.push_back(gfx::Size(638, 398)); // Shrink the other dimension.
+ frame_sizes.push_back(gfx::Size(320, 180)); // Shrink both dimensions again.
+ frame_sizes.push_back(gfx::Size(322, 180)); // Grow only one dimension.
+ frame_sizes.push_back(gfx::Size(322, 182)); // Grow the other dimension.
+ frame_sizes.push_back(gfx::Size(1920, 1080)); // Grow both dimensions again.
+
+ // Encode one frame at each size.
+ for (const auto& frame_size : frame_sizes) {
+ SetNextFrameSize(frame_size);
+ FeedMoreVideo(0);
+ }
+
+ // Encode 10 frames at each size.
+ for (const auto& frame_size : frame_sizes) {
+ SetNextFrameSize(frame_size);
+ const int kNumFrames = 10;
+ for (int i = 0; i < kNumFrames; ++i)
+ FeedMoreVideo(0);
+ }
+
+ WaitForAllVideoToBeDecoded();
+}
+
+INSTANTIATE_TEST_CASE_P(,
VideoDecoderTest,
::testing::Values(CODEC_VIDEO_VP8));
diff --git a/chromium/media/cast/sender/audio_encoder.cc b/chromium/media/cast/sender/audio_encoder.cc
index 273151f2802..3ded4978d67 100644
--- a/chromium/media/cast/sender/audio_encoder.cc
+++ b/chromium/media/cast/sender/audio_encoder.cc
@@ -14,9 +14,7 @@
#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "base/time/time.h"
-#include "media/base/audio_bus.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_environment.h"
#if !defined(OS_IOS)
#include "third_party/opus/src/include/opus.h"
@@ -56,7 +54,7 @@ class AudioEncoder::ImplBase
num_channels_(num_channels),
samples_per_frame_(samples_per_frame),
callback_(callback),
- cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
+ operational_status_(STATUS_UNINITIALIZED),
frame_duration_(base::TimeDelta::FromMicroseconds(
base::Time::kMicrosecondsPerSecond * samples_per_frame_ /
sampling_rate)),
@@ -69,12 +67,12 @@ class AudioEncoder::ImplBase
if (num_channels_ <= 0 || samples_per_frame_ <= 0 ||
frame_duration_ == base::TimeDelta() ||
samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) {
- cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
+ operational_status_ = STATUS_INVALID_CONFIGURATION;
}
}
- CastInitializationStatus InitializationResult() const {
- return cast_initialization_status_;
+ OperationalStatus InitializationResult() const {
+ return operational_status_;
}
int samples_per_frame() const {
@@ -85,7 +83,7 @@ class AudioEncoder::ImplBase
void EncodeAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
- DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
+ DCHECK_EQ(operational_status_, STATUS_INITIALIZED);
DCHECK(!recorded_time.is_null());
// Determine whether |recorded_time| is consistent with the amount of audio
@@ -169,8 +167,8 @@ class AudioEncoder::ImplBase
const int samples_per_frame_;
const FrameEncodedCallback callback_;
- // Subclass' ctor is expected to set this to STATUS_AUDIO_INITIALIZED.
- CastInitializationStatus cast_initialization_status_;
+ // Subclass' ctor is expected to set this to STATUS_INITIALIZED.
+ OperationalStatus operational_status_;
// The duration of one frame of encoded audio samples. Derived from
// |samples_per_frame_| and the sampling rate.
@@ -223,7 +221,7 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
encoder_memory_(new uint8[opus_encoder_get_size(num_channels)]),
opus_encoder_(reinterpret_cast<OpusEncoder*>(encoder_memory_.get())),
buffer_(new float[num_channels * samples_per_frame_]) {
- if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED ||
+ if (ImplBase::operational_status_ != STATUS_UNINITIALIZED ||
sampling_rate % samples_per_frame_ != 0 ||
!IsValidFrameDuration(frame_duration_)) {
return;
@@ -232,11 +230,10 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
sampling_rate,
num_channels,
OPUS_APPLICATION_AUDIO) != OPUS_OK) {
- ImplBase::cast_initialization_status_ =
- STATUS_INVALID_AUDIO_CONFIGURATION;
+ ImplBase::operational_status_ = STATUS_INVALID_CONFIGURATION;
return;
}
- ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ ImplBase::operational_status_ = STATUS_INITIALIZED;
if (bitrate <= 0) {
// Note: As of 2013-10-31, the encoder in "auto bitrate" mode would use a
@@ -250,12 +247,12 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
}
private:
- ~OpusImpl() override {}
+ ~OpusImpl() final {}
void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
int source_offset,
int buffer_fill_offset,
- int num_samples) override {
+ int num_samples) final {
// Opus requires channel-interleaved samples in a single array.
for (int ch = 0; ch < audio_bus->channels(); ++ch) {
const float* src = audio_bus->channel(ch) + source_offset;
@@ -266,7 +263,7 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
}
}
- bool EncodeFromFilledBuffer(std::string* out) override {
+ bool EncodeFromFilledBuffer(std::string* out) final {
out->resize(kOpusMaxPayloadSize);
const opus_int32 result =
opus_encode_float(opus_encoder_,
@@ -343,19 +340,18 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
file_(nullptr),
num_access_units_(0),
can_resume_(true) {
- if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED) {
+ if (ImplBase::operational_status_ != STATUS_UNINITIALIZED) {
return;
}
if (!Initialize(sampling_rate, bitrate)) {
- ImplBase::cast_initialization_status_ =
- STATUS_INVALID_AUDIO_CONFIGURATION;
+ ImplBase::operational_status_ = STATUS_INVALID_CONFIGURATION;
return;
}
- ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ ImplBase::operational_status_ = STATUS_INITIALIZED;
}
private:
- virtual ~AppleAacImpl() { Teardown(); }
+ ~AppleAacImpl() final { Teardown(); }
// Destroys the existing audio converter and file, if any.
void Teardown() {
@@ -532,7 +528,7 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
int source_offset,
int buffer_fill_offset,
- int num_samples) override {
+ int num_samples) final {
DCHECK_EQ(audio_bus->channels(), input_buffer_->channels());
// See the comment on |input_bus_| for more on this optimization. Note that
@@ -554,7 +550,7 @@ class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
source_offset, num_samples, buffer_fill_offset, input_buffer_.get());
}
- bool EncodeFromFilledBuffer(std::string* out) override {
+ bool EncodeFromFilledBuffer(std::string* out) final {
// Reset the buffer size field to the buffer capacity.
converter_abl_.mBuffers[0].mDataByteSize = max_access_unit_size_;
@@ -707,18 +703,18 @@ class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
sampling_rate / kDefaultFramesPerSecond, /* 10 ms frames */
callback),
buffer_(new int16[num_channels * samples_per_frame_]) {
- if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ if (ImplBase::operational_status_ != STATUS_UNINITIALIZED)
return;
- cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ operational_status_ = STATUS_INITIALIZED;
}
private:
- ~Pcm16Impl() override {}
+ ~Pcm16Impl() final {}
void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
int source_offset,
int buffer_fill_offset,
- int num_samples) override {
+ int num_samples) final {
audio_bus->ToInterleavedPartial(
source_offset,
num_samples,
@@ -726,7 +722,7 @@ class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
buffer_.get() + buffer_fill_offset * num_channels_);
}
- bool EncodeFromFilledBuffer(std::string* out) override {
+ bool EncodeFromFilledBuffer(std::string* out) final {
// Output 16-bit PCM integers in big-endian byte order.
out->resize(num_channels_ * samples_per_frame_ * sizeof(int16));
const int16* src = buffer_.get();
@@ -787,17 +783,17 @@ AudioEncoder::AudioEncoder(
AudioEncoder::~AudioEncoder() {}
-CastInitializationStatus AudioEncoder::InitializationResult() const {
+OperationalStatus AudioEncoder::InitializationResult() const {
DCHECK(insert_thread_checker_.CalledOnValidThread());
if (impl_.get()) {
return impl_->InitializationResult();
}
- return STATUS_UNSUPPORTED_AUDIO_CODEC;
+ return STATUS_UNSUPPORTED_CODEC;
}
int AudioEncoder::GetSamplesPerFrame() const {
DCHECK(insert_thread_checker_.CalledOnValidThread());
- if (InitializationResult() != STATUS_AUDIO_INITIALIZED) {
+ if (InitializationResult() != STATUS_INITIALIZED) {
NOTREACHED();
return std::numeric_limits<int>::max();
}
@@ -806,7 +802,7 @@ int AudioEncoder::GetSamplesPerFrame() const {
base::TimeDelta AudioEncoder::GetFrameDuration() const {
DCHECK(insert_thread_checker_.CalledOnValidThread());
- if (InitializationResult() != STATUS_AUDIO_INITIALIZED) {
+ if (InitializationResult() != STATUS_INITIALIZED) {
NOTREACHED();
return base::TimeDelta();
}
@@ -817,7 +813,7 @@ void AudioEncoder::InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
DCHECK(insert_thread_checker_.CalledOnValidThread());
DCHECK(audio_bus.get());
- if (InitializationResult() != STATUS_AUDIO_INITIALIZED) {
+ if (InitializationResult() != STATUS_INITIALIZED) {
NOTREACHED();
return;
}
diff --git a/chromium/media/cast/sender/audio_encoder.h b/chromium/media/cast/sender/audio_encoder.h
index 8c5bafad77c..e6922421702 100644
--- a/chromium/media/cast/sender/audio_encoder.h
+++ b/chromium/media/cast/sender/audio_encoder.h
@@ -33,7 +33,7 @@ class AudioEncoder {
const FrameEncodedCallback& frame_encoded_callback);
virtual ~AudioEncoder();
- CastInitializationStatus InitializationResult() const;
+ OperationalStatus InitializationResult() const;
int GetSamplesPerFrame() const;
base::TimeDelta GetFrameDuration() const;
diff --git a/chromium/media/cast/sender/audio_encoder_unittest.cc b/chromium/media/cast/sender/audio_encoder_unittest.cc
index 1ac66fd62cb..12b5f2c87fd 100644
--- a/chromium/media/cast/sender/audio_encoder_unittest.cc
+++ b/chromium/media/cast/sender/audio_encoder_unittest.cc
@@ -27,8 +27,7 @@ namespace {
class TestEncodedAudioFrameReceiver {
public:
- explicit TestEncodedAudioFrameReceiver(Codec codec)
- : codec_(codec), frames_received_(0), rtp_lower_bound_(0) {}
+ TestEncodedAudioFrameReceiver() : frames_received_(0), rtp_lower_bound_(0) {}
virtual ~TestEncodedAudioFrameReceiver() {}
int frames_received() const { return frames_received_; }
@@ -64,7 +63,6 @@ class TestEncodedAudioFrameReceiver {
}
private:
- const Codec codec_;
int frames_received_;
uint32 rtp_lower_bound_;
int samples_per_frame_;
@@ -102,7 +100,7 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
}
- void SetUp() override {
+ void SetUp() final {
task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
cast_environment_ =
new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
@@ -151,7 +149,7 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
TestAudioBusFactory::kMiddleANoteFreq,
0.5f));
- receiver_.reset(new TestEncodedAudioFrameReceiver(codec));
+ receiver_.reset(new TestEncodedAudioFrameReceiver());
audio_encoder_.reset(new AudioEncoder(
cast_environment_,
diff --git a/chromium/media/cast/sender/audio_sender.cc b/chromium/media/cast/sender/audio_sender.cc
index 4748218c8eb..d084b3645a4 100644
--- a/chromium/media/cast/sender/audio_sender.cc
+++ b/chromium/media/cast/sender/audio_sender.cc
@@ -16,11 +16,11 @@ namespace cast {
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
+ const StatusChangeCallback& status_change_cb,
CastTransportSender* const transport_sender)
: FrameSender(cast_environment,
true,
transport_sender,
- base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
audio_config.frequency,
audio_config.ssrc,
0, // |max_frame_rate_| is set after encoder initialization.
@@ -29,8 +29,6 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
NewFixedCongestionControl(audio_config.bitrate)),
samples_in_encoder_(0),
weak_factory_(this) {
- cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
-
if (!audio_config.use_external_encoder) {
audio_encoder_.reset(
new AudioEncoder(cast_environment,
@@ -41,12 +39,18 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
base::Bind(&AudioSender::OnEncodedAudioFrame,
weak_factory_.GetWeakPtr(),
audio_config.bitrate)));
- cast_initialization_status_ = audio_encoder_->InitializationResult();
- } else {
- NOTREACHED(); // No support for external audio encoding.
- cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
}
+ // AudioEncoder provides no operational status changes during normal use.
+ // Post a task now with its initialization result status to allow the client
+ // to start sending frames.
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(status_change_cb,
+ audio_encoder_ ? audio_encoder_->InitializationResult() :
+ STATUS_INVALID_CONFIGURATION));
+
// The number of samples per encoded audio frame depends on the codec and its
// initialization parameters. Now that we have an encoder, we can calculate
// the maximum frame rate.
@@ -55,7 +59,7 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
media::cast::CastTransportRtpConfig transport_config;
transport_config.ssrc = audio_config.ssrc;
- transport_config.feedback_ssrc = audio_config.incoming_feedback_ssrc;
+ transport_config.feedback_ssrc = audio_config.receiver_ssrc;
transport_config.rtp_payload_type = audio_config.rtp_payload_type;
transport_config.aes_key = audio_config.aes_key;
transport_config.aes_iv_mask = audio_config.aes_iv_mask;
@@ -73,11 +77,11 @@ AudioSender::~AudioSender() {}
void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (cast_initialization_status_ != STATUS_AUDIO_INITIALIZED) {
+
+ if (!audio_encoder_) {
NOTREACHED();
return;
}
- DCHECK(audio_encoder_.get()) << "Invalid internal state";
const base::TimeDelta next_frame_duration =
RtpDeltaToTimeDelta(audio_bus->frames(), rtp_timebase());
diff --git a/chromium/media/cast/sender/audio_sender.h b/chromium/media/cast/sender/audio_sender.h
index d7f8c69432d..b6333b101ce 100644
--- a/chromium/media/cast/sender/audio_sender.h
+++ b/chromium/media/cast/sender/audio_sender.h
@@ -33,27 +33,21 @@ class AudioSender : public FrameSender,
public:
AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
+ const StatusChangeCallback& status_change_cb,
CastTransportSender* const transport_sender);
- ~AudioSender() override;
-
- CastInitializationStatus InitializationResult() const {
- return cast_initialization_status_;
- }
+ ~AudioSender() final;
// Note: It is not guaranteed that |audio_frame| will actually be encoded and
// sent, if AudioSender detects too many frames in flight. Therefore, clients
// should be careful about the rate at which this method is called.
- //
- // Note: It is invalid to call this method if InitializationResult() returns
- // anything but STATUS_AUDIO_INITIALIZED.
void InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time);
protected:
- int GetNumberOfFramesInEncoder() const override;
- base::TimeDelta GetInFlightMediaDuration() const override;
- void OnAck(uint32 frame_id) override;
+ int GetNumberOfFramesInEncoder() const final;
+ base::TimeDelta GetInFlightMediaDuration() const final;
+ void OnAck(uint32 frame_id) final;
private:
// Called by the |audio_encoder_| with the next EncodedFrame to send.
diff --git a/chromium/media/cast/sender/audio_sender_unittest.cc b/chromium/media/cast/sender/audio_sender_unittest.cc
index 0045923df12..b066280481c 100644
--- a/chromium/media/cast/sender/audio_sender_unittest.cc
+++ b/chromium/media/cast/sender/audio_sender_unittest.cc
@@ -21,11 +21,22 @@
namespace media {
namespace cast {
+namespace {
+
+void SaveOperationalStatus(OperationalStatus* out_status,
+ OperationalStatus in_status) {
+ DVLOG(1) << "OperationalStatus transitioning from " << *out_status << " to "
+ << in_status;
+ *out_status = in_status;
+}
+
+} // namespace
+
class TestPacketSender : public PacketSender {
public:
TestPacketSender() : number_of_rtp_packets_(0), number_of_rtcp_packets_(0) {}
- bool SendPacket(PacketRef packet, const base::Closure& cb) override {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) final {
if (Rtcp::IsRtcpPacket(&packet->data[0], packet->data.size())) {
++number_of_rtcp_packets_;
} else {
@@ -40,7 +51,7 @@ class TestPacketSender : public PacketSender {
return true;
}
- int64 GetBytesSent() override { return 0; }
+ int64 GetBytesSent() final { return 0; }
int number_of_rtp_packets() const { return number_of_rtp_packets_; }
@@ -77,16 +88,23 @@ class AudioSenderTest : public ::testing::Test {
transport_sender_.reset(new CastTransportSenderImpl(
NULL,
testing_clock_,
+ net::IPEndPoint(),
dummy_endpoint,
make_scoped_ptr(new base::DictionaryValue),
base::Bind(&UpdateCastTransportStatus),
BulkRawEventsCallback(),
base::TimeDelta(),
task_runner_,
+ PacketReceiverCallback(),
&transport_));
+ OperationalStatus operational_status = STATUS_UNINITIALIZED;
audio_sender_.reset(new AudioSender(
- cast_environment_, audio_config_, transport_sender_.get()));
+ cast_environment_,
+ audio_config_,
+ base::Bind(&SaveOperationalStatus, &operational_status),
+ transport_sender_.get()));
task_runner_->RunTasks();
+ CHECK_EQ(STATUS_INITIALIZED, operational_status);
}
~AudioSenderTest() override {}
diff --git a/chromium/media/cast/sender/congestion_control.cc b/chromium/media/cast/sender/congestion_control.cc
index d14f9b08f37..5ede1b5886b 100644
--- a/chromium/media/cast/sender/congestion_control.cc
+++ b/chromium/media/cast/sender/congestion_control.cc
@@ -29,23 +29,23 @@ class AdaptiveCongestionControl : public CongestionControl {
uint32 min_bitrate_configured,
double max_frame_rate);
- ~AdaptiveCongestionControl() override;
+ ~AdaptiveCongestionControl() final;
- void UpdateRtt(base::TimeDelta rtt) override;
+ void UpdateRtt(base::TimeDelta rtt) final;
- void UpdateTargetPlayoutDelay(base::TimeDelta delay) override;
+ void UpdateTargetPlayoutDelay(base::TimeDelta delay) final;
// Called when an encoded frame is sent to the transport.
void SendFrameToTransport(uint32 frame_id,
size_t frame_size,
- base::TimeTicks when) override;
+ base::TimeTicks when) final;
// Called when we receive an ACK for a frame.
- void AckFrame(uint32 frame_id, base::TimeTicks when) override;
+ void AckFrame(uint32 frame_id, base::TimeTicks when) final;
// Returns the bitrate we should use for the next frame.
uint32 GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) override;
+ base::TimeDelta playout_delay) final;
private:
struct FrameStats {
@@ -96,23 +96,23 @@ class AdaptiveCongestionControl : public CongestionControl {
class FixedCongestionControl : public CongestionControl {
public:
FixedCongestionControl(uint32 bitrate) : bitrate_(bitrate) {}
- ~FixedCongestionControl() override {}
+ ~FixedCongestionControl() final {}
- void UpdateRtt(base::TimeDelta rtt) override {}
+ void UpdateRtt(base::TimeDelta rtt) final {}
- void UpdateTargetPlayoutDelay(base::TimeDelta delay) override {}
+ void UpdateTargetPlayoutDelay(base::TimeDelta delay) final {}
// Called when an encoded frame is sent to the transport.
void SendFrameToTransport(uint32 frame_id,
size_t frame_size,
- base::TimeTicks when) override {}
+ base::TimeTicks when) final {}
// Called when we receive an ACK for a frame.
- void AckFrame(uint32 frame_id, base::TimeTicks when) override {}
+ void AckFrame(uint32 frame_id, base::TimeTicks when) final {}
// Returns the bitrate we should use for the next frame.
uint32 GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) override {
+ base::TimeDelta playout_delay) final {
return bitrate_;
}
diff --git a/chromium/media/cast/sender/external_video_encoder.cc b/chromium/media/cast/sender/external_video_encoder.cc
index 6dec102436e..75ea4b49790 100644
--- a/chromium/media/cast/sender/external_video_encoder.cc
+++ b/chromium/media/cast/sender/external_video_encoder.cc
@@ -15,15 +15,9 @@
#include "media/cast/cast_defines.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/net/cast_transport_config.h"
-#include "media/video/video_encode_accelerator.h"
-
-namespace media {
-namespace cast {
-class LocalVideoEncodeAcceleratorClient;
-} // namespace cast
-} // namespace media
namespace {
+
static const size_t kOutputBufferCount = 3;
void LogFrameEncodedEvent(
@@ -35,6 +29,7 @@ void LogFrameEncodedEvent(
event_time, media::cast::FRAME_ENCODED, media::cast::VIDEO_EVENT,
rtp_timestamp, frame_id);
}
+
} // namespace
namespace media {
@@ -54,102 +49,63 @@ struct InProgressFrameEncode {
frame_encoded_callback(callback) {}
};
-// The ExternalVideoEncoder class can be deleted directly by cast, while
-// LocalVideoEncodeAcceleratorClient stays around long enough to properly shut
-// down the VideoEncodeAccelerator.
-class LocalVideoEncodeAcceleratorClient
+// Owns a VideoEncoderAccelerator instance and provides the necessary adapters
+// to encode media::VideoFrames and emit media::cast::EncodedFrames. All
+// methods must be called on the thread associated with the given
+// SingleThreadTaskRunner, except for the task_runner() accessor.
+class ExternalVideoEncoder::VEAClientImpl
: public VideoEncodeAccelerator::Client,
- public base::RefCountedThreadSafe<LocalVideoEncodeAcceleratorClient> {
+ public base::RefCountedThreadSafe<VEAClientImpl> {
public:
- // Create an instance of this class and post a task to create
- // video_encode_accelerator_. A ref to |this| will be kept, awaiting reply
- // via ProxyCreateVideoEncodeAccelerator, which will provide us with the
- // encoder task runner and vea instance. We cannot be destroyed until we
- // receive the reply, otherwise the VEA object created may leak.
- static scoped_refptr<LocalVideoEncodeAcceleratorClient> Create(
- scoped_refptr<CastEnvironment> cast_environment,
- const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
- const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
- const base::WeakPtr<ExternalVideoEncoder>& weak_owner) {
- scoped_refptr<LocalVideoEncodeAcceleratorClient> client(
- new LocalVideoEncodeAcceleratorClient(
- cast_environment, create_video_encode_mem_cb, weak_owner));
-
- // This will keep a ref to |client|, if weak_owner is destroyed before
- // ProxyCreateVideoEncodeAccelerator is called, we will stay alive until
- // we can properly destroy the VEA.
- create_vea_cb.Run(base::Bind(
- &LocalVideoEncodeAcceleratorClient::OnCreateVideoEncodeAcceleratorProxy,
- client));
-
- return client;
+ VEAClientImpl(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const scoped_refptr<base::SingleThreadTaskRunner>& encoder_task_runner,
+ scoped_ptr<media::VideoEncodeAccelerator> vea,
+ int max_frame_rate,
+ const StatusChangeCallback& status_change_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_memory_cb)
+ : cast_environment_(cast_environment),
+ task_runner_(encoder_task_runner),
+ max_frame_rate_(max_frame_rate),
+ status_change_cb_(status_change_cb),
+ create_video_encode_memory_cb_(create_video_encode_memory_cb),
+ video_encode_accelerator_(vea.Pass()),
+ encoder_active_(false),
+ next_frame_id_(0u),
+ key_frame_encountered_(false) {
}
- // Initialize the real HW encoder.
- void Initialize(const VideoSenderConfig& video_config) {
- DCHECK(encoder_task_runner_.get());
- DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
-
- VideoCodecProfile output_profile = media::VIDEO_CODEC_PROFILE_UNKNOWN;
- switch (video_config.codec) {
- case CODEC_VIDEO_VP8:
- output_profile = media::VP8PROFILE_ANY;
- break;
- case CODEC_VIDEO_H264:
- output_profile = media::H264PROFILE_MAIN;
- break;
- case CODEC_VIDEO_FAKE:
- NOTREACHED() << "Fake software video encoder cannot be external";
- break;
- default:
- NOTREACHED() << "Video codec not specified or not supported";
- break;
- }
- max_frame_rate_ = video_config.max_frame_rate;
+ base::SingleThreadTaskRunner* task_runner() const {
+ return task_runner_.get();
+ }
+
+ void Initialize(const gfx::Size& frame_size,
+ VideoCodecProfile codec_profile,
+ int start_bit_rate,
+ uint32 first_frame_id) {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
- bool result = video_encode_accelerator_->Initialize(
+ encoder_active_ = video_encode_accelerator_->Initialize(
media::VideoFrame::I420,
- gfx::Size(video_config.width, video_config.height),
- output_profile,
- video_config.start_bitrate,
+ frame_size,
+ codec_profile,
+ start_bit_rate,
this);
+ next_frame_id_ = first_frame_id;
UMA_HISTOGRAM_BOOLEAN("Cast.Sender.VideoEncodeAcceleratorInitializeSuccess",
- result);
- if (!result) {
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&ExternalVideoEncoder::EncoderInitialized, weak_owner_,
- false));
- return;
- }
-
- // Wait until shared memory is allocated to indicate that encoder is
- // initialized.
- }
-
- // Destroy the VEA on the correct thread.
- void Destroy() {
- DCHECK(encoder_task_runner_.get());
- if (!video_encode_accelerator_)
- return;
+ encoder_active_);
- if (encoder_task_runner_->RunsTasksOnCurrentThread()) {
- video_encode_accelerator_.reset();
- } else {
- // We do this instead of just reposting to encoder_task_runner_, because
- // we are called from the destructor.
- encoder_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&DestroyVideoEncodeAcceleratorOnEncoderThread,
- base::Passed(&video_encode_accelerator_)));
- }
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(status_change_cb_,
+ encoder_active_ ? STATUS_INITIALIZED :
+ STATUS_CODEC_INIT_FAILED));
}
- void SetBitRate(uint32 bit_rate) {
- DCHECK(encoder_task_runner_.get());
- DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+ void SetBitRate(int bit_rate) {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
video_encode_accelerator_->RequestEncodingParametersChange(bit_rate,
max_frame_rate_);
@@ -160,8 +116,10 @@ class LocalVideoEncodeAcceleratorClient
const base::TimeTicks& reference_time,
bool key_frame_requested,
const VideoEncoder::FrameEncodedCallback& frame_encoded_callback) {
- DCHECK(encoder_task_runner_.get());
- DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
+
+ if (!encoder_active_)
+ return;
in_progress_frame_encodes_.push_back(InProgressFrameEncode(
TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency),
@@ -173,40 +131,45 @@ class LocalVideoEncodeAcceleratorClient
}
protected:
- void NotifyError(VideoEncodeAccelerator::Error error) override {
- DCHECK(encoder_task_runner_.get());
- DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
- VLOG(1) << "ExternalVideoEncoder NotifyError: " << error;
+ void NotifyError(VideoEncodeAccelerator::Error error) final {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
+
+ DCHECK(error != VideoEncodeAccelerator::kInvalidArgumentError &&
+ error != VideoEncodeAccelerator::kIllegalStateError);
+
+ encoder_active_ = false;
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(&ExternalVideoEncoder::EncoderError, weak_owner_));
+ base::Bind(status_change_cb_, STATUS_CODEC_RUNTIME_ERROR));
+
+ // TODO(miu): Force-flush all |in_progress_frame_encodes_| immediately so
+ // pending frames do not become stuck, freezing VideoSender.
}
// Called to allocate the input and output buffers.
void RequireBitstreamBuffers(unsigned int input_count,
const gfx::Size& input_coded_size,
- size_t output_buffer_size) override {
- DCHECK(encoder_task_runner_.get());
- DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
- DCHECK(video_encode_accelerator_);
+ size_t output_buffer_size) final {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
+ // TODO(miu): Investigate why we are ignoring |input_count| (4) and instead
+ // using |kOutputBufferCount| (3) here.
for (size_t j = 0; j < kOutputBufferCount; ++j) {
create_video_encode_memory_cb_.Run(
output_buffer_size,
- base::Bind(&LocalVideoEncodeAcceleratorClient::OnCreateSharedMemory,
- this));
+ base::Bind(&VEAClientImpl::OnCreateSharedMemory, this));
}
}
- // Encoder has encoded a frame and it's available in one of out output
- // buffers.
+ // Encoder has encoded a frame and it's available in one of the output
+ // buffers. Package the result in a media::cast::EncodedFrame and post it
+ // to the Cast MAIN thread via the supplied callback.
void BitstreamBufferReady(int32 bitstream_buffer_id,
size_t payload_size,
- bool key_frame) override {
- DCHECK(encoder_task_runner_.get());
- DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+ bool key_frame) final {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
if (bitstream_buffer_id < 0 ||
bitstream_buffer_id >= static_cast<int32>(output_buffers_.size())) {
NOTREACHED();
@@ -229,6 +192,9 @@ class LocalVideoEncodeAcceleratorClient
// Do not send video until we have encountered the first key frame.
// Save the bitstream buffer in |stream_header_| to be sent later along
// with the first key frame.
+ //
+ // TODO(miu): Should |stream_header_| be an std::ostringstream for
+ // performance reasons?
stream_header_.append(static_cast<const char*>(output_buffer->memory()),
payload_size);
} else if (!in_progress_frame_encodes_.empty()) {
@@ -237,7 +203,7 @@ class LocalVideoEncodeAcceleratorClient
scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
encoded_frame->dependency = key_frame ? EncodedFrame::KEY :
EncodedFrame::DEPENDENT;
- encoded_frame->frame_id = ++last_encoded_frame_id_;
+ encoded_frame->frame_id = next_frame_id_++;
if (key_frame)
encoded_frame->referenced_frame_id = encoded_frame->frame_id;
else
@@ -280,60 +246,29 @@ class LocalVideoEncodeAcceleratorClient
}
private:
- LocalVideoEncodeAcceleratorClient(
- scoped_refptr<CastEnvironment> cast_environment,
- const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
- const base::WeakPtr<ExternalVideoEncoder>& weak_owner)
- : cast_environment_(cast_environment),
- create_video_encode_memory_cb_(create_video_encode_mem_cb),
- weak_owner_(weak_owner),
- last_encoded_frame_id_(kStartFrameId),
- key_frame_encountered_(false) {}
-
- // Trampoline VEA creation callback to OnCreateVideoEncodeAccelerator()
- // on encoder_task_runner. Normally we would just repost the same method to
- // it, and would not need a separate proxy method, but we can't
- // ThreadTaskRunnerHandle::Get() in unittests just yet.
- void OnCreateVideoEncodeAcceleratorProxy(
- scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
- scoped_ptr<media::VideoEncodeAccelerator> vea) {
- encoder_task_runner->PostTask(
- FROM_HERE,
- base::Bind(&media::cast::LocalVideoEncodeAcceleratorClient::
- OnCreateVideoEncodeAccelerator,
- this,
- encoder_task_runner,
- base::Passed(&vea)));
- }
+ friend class base::RefCountedThreadSafe<VEAClientImpl>;
- void OnCreateVideoEncodeAccelerator(
- scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
- scoped_ptr<media::VideoEncodeAccelerator> vea) {
- encoder_task_runner_ = encoder_task_runner;
- video_encode_accelerator_.reset(vea.release());
-
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
+ ~VEAClientImpl() final {
+ // According to the media::VideoEncodeAccelerator interface, Destroy()
+ // should be called instead of invoking its private destructor.
+ task_runner_->PostTask(
FROM_HERE,
- base::Bind(&ExternalVideoEncoder::OnCreateVideoEncodeAccelerator,
- weak_owner_,
- encoder_task_runner_));
+ base::Bind(&media::VideoEncodeAccelerator::Destroy,
+ base::Unretained(video_encode_accelerator_.release())));
}
// Note: This method can be called on any thread.
void OnCreateSharedMemory(scoped_ptr<base::SharedMemory> memory) {
- encoder_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&LocalVideoEncodeAcceleratorClient::ReceivedSharedMemory,
- this,
- base::Passed(&memory)));
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(&VEAClientImpl::OnReceivedSharedMemory,
+ this,
+ base::Passed(&memory)));
}
- void ReceivedSharedMemory(scoped_ptr<base::SharedMemory> memory) {
- DCHECK(encoder_task_runner_.get());
- DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+ void OnReceivedSharedMemory(scoped_ptr<base::SharedMemory> memory) {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
- output_buffers_.push_back(memory.release());
+ output_buffers_.push_back(memory.Pass());
// Wait until all requested buffers are received.
if (output_buffers_.size() < kOutputBufferCount)
@@ -346,33 +281,16 @@ class LocalVideoEncodeAcceleratorClient
output_buffers_[i]->handle(),
output_buffers_[i]->mapped_size()));
}
-
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&ExternalVideoEncoder::EncoderInitialized, weak_owner_,
- true));
- }
-
- static void DestroyVideoEncodeAcceleratorOnEncoderThread(
- scoped_ptr<media::VideoEncodeAccelerator> vea) {
- // VEA::~VEA specialization takes care of calling Destroy() on the VEA impl.
- }
-
- friend class base::RefCountedThreadSafe<LocalVideoEncodeAcceleratorClient>;
-
- ~LocalVideoEncodeAcceleratorClient() override {
- Destroy();
- DCHECK(!video_encode_accelerator_);
}
const scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner_;
- scoped_ptr<media::VideoEncodeAccelerator> video_encode_accelerator_;
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ const int max_frame_rate_;
+ const StatusChangeCallback status_change_cb_; // Must be run on MAIN thread.
const CreateVideoEncodeMemoryCallback create_video_encode_memory_cb_;
- const base::WeakPtr<ExternalVideoEncoder> weak_owner_;
- int max_frame_rate_;
- uint32 last_encoded_frame_id_;
+ scoped_ptr<media::VideoEncodeAccelerator> video_encode_accelerator_;
+ bool encoder_active_;
+ uint32 next_frame_id_;
bool key_frame_encountered_;
std::string stream_header_;
@@ -382,109 +300,174 @@ class LocalVideoEncodeAcceleratorClient
// FIFO list.
std::list<InProgressFrameEncode> in_progress_frame_encodes_;
- DISALLOW_COPY_AND_ASSIGN(LocalVideoEncodeAcceleratorClient);
+ DISALLOW_COPY_AND_ASSIGN(VEAClientImpl);
};
+// static
+bool ExternalVideoEncoder::IsSupported(const VideoSenderConfig& video_config) {
+ if (video_config.codec != CODEC_VIDEO_VP8 &&
+ video_config.codec != CODEC_VIDEO_H264)
+ return false;
+
+ // TODO(miu): "Layering hooks" are needed to be able to query outside of
+ // libmedia, to determine whether the system provides a hardware encoder. For
+ // now, assume that this was already checked by this point.
+ // http://crbug.com/454029
+ return video_config.use_external_encoder;
+}
+
ExternalVideoEncoder::ExternalVideoEncoder(
- scoped_refptr<CastEnvironment> cast_environment,
+ const scoped_refptr<CastEnvironment>& cast_environment,
const VideoSenderConfig& video_config,
- const CastInitializationCallback& initialization_cb,
+ const gfx::Size& frame_size,
+ uint32 first_frame_id,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
- const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb)
- : video_config_(video_config),
- cast_environment_(cast_environment),
- encoder_active_(false),
+ const CreateVideoEncodeMemoryCallback& create_video_encode_memory_cb)
+ : cast_environment_(cast_environment),
+ create_video_encode_memory_cb_(create_video_encode_memory_cb),
+ frame_size_(frame_size),
+ bit_rate_(video_config.start_bitrate),
key_frame_requested_(false),
- initialization_cb_(initialization_cb),
weak_factory_(this) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- video_accelerator_client_ =
- LocalVideoEncodeAcceleratorClient::Create(cast_environment_,
- create_vea_cb,
- create_video_encode_mem_cb,
- weak_factory_.GetWeakPtr());
- DCHECK(video_accelerator_client_.get());
+ DCHECK_GT(video_config.max_frame_rate, 0);
+ DCHECK(!frame_size_.IsEmpty());
+ DCHECK(!status_change_cb.is_null());
+ DCHECK(!create_vea_cb.is_null());
+ DCHECK(!create_video_encode_memory_cb_.is_null());
+ DCHECK_GT(bit_rate_, 0);
+
+ create_vea_cb.Run(
+ base::Bind(&ExternalVideoEncoder::OnCreateVideoEncodeAccelerator,
+ weak_factory_.GetWeakPtr(),
+ video_config,
+ first_frame_id,
+ status_change_cb));
}
ExternalVideoEncoder::~ExternalVideoEncoder() {
}
-void ExternalVideoEncoder::EncoderInitialized(bool success) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- encoder_active_ = success;
- DCHECK(!initialization_cb_.is_null());
- initialization_cb_.Run(
- success ?
- STATUS_VIDEO_INITIALIZED : STATUS_HW_VIDEO_ENCODER_NOT_SUPPORTED);
- initialization_cb_.Reset();
-}
-
-void ExternalVideoEncoder::EncoderError() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- encoder_active_ = false;
-}
-
-void ExternalVideoEncoder::OnCreateVideoEncodeAccelerator(
- scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- encoder_task_runner_ = encoder_task_runner;
-
- encoder_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&LocalVideoEncodeAcceleratorClient::Initialize,
- video_accelerator_client_,
- video_config_));
-}
-
bool ExternalVideoEncoder::EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!frame_encoded_callback.is_null());
- if (!encoder_active_)
+ if (!client_ || video_frame->visible_rect().size() != frame_size_)
return false;
- encoder_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&LocalVideoEncodeAcceleratorClient::EncodeVideoFrame,
- video_accelerator_client_,
- video_frame,
- reference_time,
- key_frame_requested_,
- frame_encoded_callback));
-
+ client_->task_runner()->PostTask(FROM_HERE,
+ base::Bind(&VEAClientImpl::EncodeVideoFrame,
+ client_,
+ video_frame,
+ reference_time,
+ key_frame_requested_,
+ frame_encoded_callback));
key_frame_requested_ = false;
return true;
}
-// Inform the encoder about the new target bit rate.
void ExternalVideoEncoder::SetBitRate(int new_bit_rate) {
- if (!encoder_active_) {
- // If we receive SetBitRate() before VEA creation callback is invoked,
- // cache the new bit rate in the encoder config and use the new settings
- // to initialize VEA.
- video_config_.start_bitrate = new_bit_rate;
- return;
- }
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK_GT(new_bit_rate, 0);
- encoder_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&LocalVideoEncodeAcceleratorClient::SetBitRate,
- video_accelerator_client_,
- new_bit_rate));
+ bit_rate_ = new_bit_rate;
+ if (!client_)
+ return;
+ client_->task_runner()->PostTask(
+ FROM_HERE, base::Bind(&VEAClientImpl::SetBitRate, client_, bit_rate_));
}
-// Inform the encoder to encode the next frame as a key frame.
void ExternalVideoEncoder::GenerateKeyFrame() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
key_frame_requested_ = true;
}
-// Inform the encoder to only reference frames older or equal to frame_id;
void ExternalVideoEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) {
- // Do nothing not supported.
+ // Do nothing. Not supported.
}
+
+void ExternalVideoEncoder::OnCreateVideoEncodeAccelerator(
+ const VideoSenderConfig& video_config,
+ uint32 first_frame_id,
+ const StatusChangeCallback& status_change_cb,
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
+ scoped_ptr<media::VideoEncodeAccelerator> vea) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // The callback will be invoked with null pointers in the case where the
+ // system does not support or lacks the resources to provide GPU-accelerated
+ // video encoding.
+ if (!encoder_task_runner || !vea) {
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(status_change_cb, STATUS_CODEC_INIT_FAILED));
+ return;
+ }
+
+ VideoCodecProfile codec_profile;
+ switch (video_config.codec) {
+ case CODEC_VIDEO_VP8:
+ codec_profile = media::VP8PROFILE_ANY;
+ break;
+ case CODEC_VIDEO_H264:
+ codec_profile = media::H264PROFILE_MAIN;
+ break;
+ case CODEC_VIDEO_FAKE:
+ NOTREACHED() << "Fake software video encoder cannot be external";
+ // ...flow through to next case...
+ default:
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(status_change_cb, STATUS_UNSUPPORTED_CODEC));
+ return;
+ }
+
+ DCHECK(!client_);
+ client_ = new VEAClientImpl(cast_environment_,
+ encoder_task_runner,
+ vea.Pass(),
+ video_config.max_frame_rate,
+ status_change_cb,
+ create_video_encode_memory_cb_);
+ client_->task_runner()->PostTask(FROM_HERE,
+ base::Bind(&VEAClientImpl::Initialize,
+ client_,
+ frame_size_,
+ codec_profile,
+ bit_rate_,
+ first_frame_id));
+}
+
+SizeAdaptableExternalVideoEncoder::SizeAdaptableExternalVideoEncoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_memory_cb)
+ : SizeAdaptableVideoEncoderBase(cast_environment,
+ video_config,
+ status_change_cb),
+ create_vea_cb_(create_vea_cb),
+ create_video_encode_memory_cb_(create_video_encode_memory_cb) {}
+
+SizeAdaptableExternalVideoEncoder::~SizeAdaptableExternalVideoEncoder() {}
+
+scoped_ptr<VideoEncoder> SizeAdaptableExternalVideoEncoder::CreateEncoder() {
+ return scoped_ptr<VideoEncoder>(new ExternalVideoEncoder(
+ cast_environment(),
+ video_config(),
+ frame_size(),
+ last_frame_id() + 1,
+ CreateEncoderStatusChangeCallback(),
+ create_vea_cb_,
+ create_video_encode_memory_cb_));
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/external_video_encoder.h b/chromium/media/cast/sender/external_video_encoder.h
index 90125557c05..fcd616c7f22 100644
--- a/chromium/media/cast/sender/external_video_encoder.h
+++ b/chromium/media/cast/sender/external_video_encoder.h
@@ -9,73 +9,99 @@
#include "base/memory/weak_ptr.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/sender/size_adaptable_video_encoder_base.h"
#include "media/cast/sender/video_encoder.h"
#include "media/video/video_encode_accelerator.h"
-
-namespace media {
-class VideoFrame;
-}
+#include "ui/gfx/geometry/size.h"
namespace media {
namespace cast {
-class LocalVideoEncodeAcceleratorClient;
-
-// This object is called external from the main cast thread and internally from
-// the video encoder thread.
+// Cast MAIN thread proxy to the internal media::VideoEncodeAccelerator
+// implementation running on a separate thread. Encodes media::VideoFrames and
+// emits media::cast::EncodedFrames.
class ExternalVideoEncoder : public VideoEncoder {
public:
+ // Returns true if the current platform and system configuration supports
+ // using ExternalVideoEncoder with the given |video_config|.
+ static bool IsSupported(const VideoSenderConfig& video_config);
+
ExternalVideoEncoder(
- scoped_refptr<CastEnvironment> cast_environment,
+ const scoped_refptr<CastEnvironment>& cast_environment,
const VideoSenderConfig& video_config,
- const CastInitializationCallback& initialization_cb,
+ const gfx::Size& frame_size,
+ uint32 first_frame_id,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
- const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb);
+ const CreateVideoEncodeMemoryCallback& create_video_encode_memory_cb);
- ~ExternalVideoEncoder() override;
+ ~ExternalVideoEncoder() final;
// VideoEncoder implementation.
bool EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time,
- const FrameEncodedCallback& frame_encoded_callback) override;
- void SetBitRate(int new_bit_rate) override;
- void GenerateKeyFrame() override;
- void LatestFrameIdToReference(uint32 frame_id) override;
+ const FrameEncodedCallback& frame_encoded_callback) final;
+ void SetBitRate(int new_bit_rate) final;
+ void GenerateKeyFrame() final;
+ void LatestFrameIdToReference(uint32 frame_id) final;
- // Called when video_accelerator_client_ has finished creating the VEA and
- // is ready for use.
- void OnCreateVideoEncodeAccelerator(
- scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner);
+ private:
+ class VEAClientImpl;
- protected:
- // If |success| is true then encoder is initialized successfully.
- // Otherwise encoder initialization failed.
- void EncoderInitialized(bool success);
- void EncoderError();
+ // Method invoked by the CreateVideoEncodeAcceleratorCallback to construct a
+ // VEAClientImpl to own and interface with a new |vea|. Upon return,
+ // |client_| holds a reference to the new VEAClientImpl.
+ void OnCreateVideoEncodeAccelerator(
+ const VideoSenderConfig& video_config,
+ uint32 first_frame_id,
+ const StatusChangeCallback& status_change_cb,
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
+ scoped_ptr<media::VideoEncodeAccelerator> vea);
- private:
- friend class LocalVideoEncodeAcceleratorClient;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ const CreateVideoEncodeMemoryCallback create_video_encode_memory_cb_;
- VideoSenderConfig video_config_;
- scoped_refptr<CastEnvironment> cast_environment_;
+ // The size of the visible region of the video frames to be encoded.
+ const gfx::Size frame_size_;
- bool encoder_active_;
+ int bit_rate_;
bool key_frame_requested_;
- scoped_refptr<LocalVideoEncodeAcceleratorClient> video_accelerator_client_;
- scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner_;
-
- CastInitializationCallback initialization_cb_;
+ scoped_refptr<VEAClientImpl> client_;
- // Weak pointer factory for posting back LocalVideoEncodeAcceleratorClient
- // notifications to ExternalVideoEncoder.
+ // Provides a weak pointer for the OnCreateVideoEncoderAccelerator() callback.
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<ExternalVideoEncoder> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(ExternalVideoEncoder);
};
+// An implementation of SizeAdaptableVideoEncoderBase to proxy for
+// ExternalVideoEncoder instances.
+class SizeAdaptableExternalVideoEncoder : public SizeAdaptableVideoEncoderBase {
+ public:
+ SizeAdaptableExternalVideoEncoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_memory_cb);
+
+ ~SizeAdaptableExternalVideoEncoder() final;
+
+ protected:
+ scoped_ptr<VideoEncoder> CreateEncoder() final;
+
+ private:
+ // Special callbacks needed by media::cast::ExternalVideoEncoder.
+ // TODO(miu): Remove these. http://crbug.com/454029
+ const CreateVideoEncodeAcceleratorCallback create_vea_cb_;
+ const CreateVideoEncodeMemoryCallback create_video_encode_memory_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(SizeAdaptableExternalVideoEncoder);
+};
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/external_video_encoder_unittest.cc b/chromium/media/cast/sender/external_video_encoder_unittest.cc
deleted file mode 100644
index db553681d54..00000000000
--- a/chromium/media/cast/sender/external_video_encoder_unittest.cc
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/video_frame.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/sender/external_video_encoder.h"
-#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/test/fake_video_encode_accelerator.h"
-#include "media/cast/test/utility/video_utility.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-using testing::_;
-
-namespace {
-
-void IgnoreInitializationStatus(CastInitializationStatus status) {}
-
-class VEAFactory {
- public:
- VEAFactory(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- scoped_ptr<VideoEncodeAccelerator> vea)
- : task_runner_(task_runner), vea_(vea.Pass()) {}
-
- void CreateVideoEncodeAccelerator(
- const ReceiveVideoEncodeAcceleratorCallback& callback) {
- create_cb_ = callback;
- }
-
- void FinishCreatingVideoEncodeAccelerator() {
- create_cb_.Run(task_runner_, vea_.Pass());
- }
-
- private:
- const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- scoped_ptr<VideoEncodeAccelerator> vea_;
- ReceiveVideoEncodeAcceleratorCallback create_cb_;
-};
-
-void CreateSharedMemory(
- size_t size, const ReceiveVideoEncodeMemoryCallback& callback) {
- scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
- if (!shm->CreateAndMapAnonymous(size)) {
- NOTREACHED();
- return;
- }
- callback.Run(shm.Pass());
-}
-
-class TestVideoEncoderCallback
- : public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
- public:
- TestVideoEncoderCallback() {}
-
- void SetExpectedResult(uint32 expected_frame_id,
- uint32 expected_last_referenced_frame_id,
- uint32 expected_rtp_timestamp,
- const base::TimeTicks& expected_reference_time) {
- expected_frame_id_ = expected_frame_id;
- expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
- expected_rtp_timestamp_ = expected_rtp_timestamp;
- expected_reference_time_ = expected_reference_time;
- }
-
- void DeliverEncodedVideoFrame(
- scoped_ptr<EncodedFrame> encoded_frame) {
- if (expected_frame_id_ == expected_last_referenced_frame_id_) {
- EXPECT_EQ(EncodedFrame::KEY, encoded_frame->dependency);
- } else {
- EXPECT_EQ(EncodedFrame::DEPENDENT,
- encoded_frame->dependency);
- }
- EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
- EXPECT_EQ(expected_last_referenced_frame_id_,
- encoded_frame->referenced_frame_id);
- EXPECT_EQ(expected_rtp_timestamp_, encoded_frame->rtp_timestamp);
- EXPECT_EQ(expected_reference_time_, encoded_frame->reference_time);
- }
-
- protected:
- virtual ~TestVideoEncoderCallback() {}
-
- private:
- friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
-
- bool expected_key_frame_;
- uint32 expected_frame_id_;
- uint32 expected_last_referenced_frame_id_;
- uint32 expected_rtp_timestamp_;
- base::TimeTicks expected_reference_time_;
-
- DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
-};
-} // namespace
-
-class ExternalVideoEncoderTest : public ::testing::Test {
- protected:
- ExternalVideoEncoderTest()
- : test_video_encoder_callback_(new TestVideoEncoderCallback()) {
- video_config_.ssrc = 1;
- video_config_.incoming_feedback_ssrc = 2;
- video_config_.rtp_payload_type = 127;
- video_config_.use_external_encoder = true;
- video_config_.width = 320;
- video_config_.height = 240;
- video_config_.max_bitrate = 5000000;
- video_config_.min_bitrate = 1000000;
- video_config_.start_bitrate = 2000000;
- video_config_.max_qp = 56;
- video_config_.min_qp = 0;
- video_config_.max_frame_rate = 30;
- video_config_.max_number_of_video_buffers_used = 3;
- video_config_.codec = CODEC_VIDEO_VP8;
- gfx::Size size(video_config_.width, video_config_.height);
- video_frame_ = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
- PopulateVideoFrame(video_frame_.get(), 123);
-
- testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
- task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
- cast_environment_ =
- new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
- task_runner_,
- task_runner_,
- task_runner_);
-
- fake_vea_ = new test::FakeVideoEncodeAccelerator(task_runner_,
- &stored_bitrates_);
- scoped_ptr<VideoEncodeAccelerator> fake_vea(fake_vea_);
- VEAFactory vea_factory(task_runner_, fake_vea.Pass());
- video_encoder_.reset(new ExternalVideoEncoder(
- cast_environment_,
- video_config_,
- base::Bind(&IgnoreInitializationStatus),
- base::Bind(&VEAFactory::CreateVideoEncodeAccelerator,
- base::Unretained(&vea_factory)),
- base::Bind(&CreateSharedMemory)));
- vea_factory.FinishCreatingVideoEncodeAccelerator();
- }
-
- ~ExternalVideoEncoderTest() override {}
-
- void AdvanceClockAndVideoFrameTimestamp() {
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
- video_frame_->set_timestamp(
- video_frame_->timestamp() + base::TimeDelta::FromMilliseconds(33));
- }
-
- base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
- test::FakeVideoEncodeAccelerator* fake_vea_; // Owned by video_encoder_.
- std::vector<uint32> stored_bitrates_;
- scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
- VideoSenderConfig video_config_;
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- scoped_ptr<VideoEncoder> video_encoder_;
- scoped_refptr<media::VideoFrame> video_frame_;
- scoped_refptr<CastEnvironment> cast_environment_;
-
- DISALLOW_COPY_AND_ASSIGN(ExternalVideoEncoderTest);
-};
-
-TEST_F(ExternalVideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
- task_runner_->RunTasks(); // Run the initializer on the correct thread.
-
- VideoEncoder::FrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
- test_video_encoder_callback_.get());
-
- test_video_encoder_callback_->SetExpectedResult(
- 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- video_encoder_->SetBitRate(2000);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
-
- for (int i = 0; i < 6; ++i) {
- AdvanceClockAndVideoFrameTimestamp();
- test_video_encoder_callback_->SetExpectedResult(
- i + 1,
- i,
- TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
- }
- // We need to run the task to cleanup the GPU instance.
- video_encoder_.reset(NULL);
- task_runner_->RunTasks();
-
- ASSERT_EQ(1u, stored_bitrates_.size());
- EXPECT_EQ(2000u, stored_bitrates_[0]);
-}
-
-TEST_F(ExternalVideoEncoderTest, StreamHeader) {
- task_runner_->RunTasks(); // Run the initializer on the correct thread.
-
- VideoEncoder::FrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
- test_video_encoder_callback_.get());
-
- // Force the FakeVideoEncodeAccelerator to return a dummy non-key frame first.
- fake_vea_->SendDummyFrameForTesting(false);
-
- // Verify the first returned bitstream buffer is still a key frame.
- test_video_encoder_callback_->SetExpectedResult(
- 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
-
- // We need to run the task to cleanup the GPU instance.
- video_encoder_.reset(NULL);
- task_runner_->RunTasks();
-}
-
-// Verify that everything goes well even if ExternalVideoEncoder is destroyed
-// before it has a chance to receive the VEA creation callback.
-TEST(ExternalVideoEncoderEarlyDestroyTest, DestroyBeforeVEACreatedCallback) {
- VideoSenderConfig video_config;
- base::SimpleTestTickClock* testing_clock = new base::SimpleTestTickClock();
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner(
- new test::FakeSingleThreadTaskRunner(testing_clock));
- scoped_refptr<CastEnvironment> cast_environment(
- new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock).Pass(),
- task_runner,
- task_runner,
- task_runner));
-
- std::vector<uint32> stored_bitrates;
- scoped_ptr<VideoEncodeAccelerator> fake_vea(
- new test::FakeVideoEncodeAccelerator(task_runner, &stored_bitrates));
- VEAFactory vea_factory(task_runner, fake_vea.Pass());
-
- scoped_ptr<ExternalVideoEncoder> video_encoder(new ExternalVideoEncoder(
- cast_environment,
- video_config,
- base::Bind(&IgnoreInitializationStatus),
- base::Bind(&VEAFactory::CreateVideoEncodeAccelerator,
- base::Unretained(&vea_factory)),
- base::Bind(&CreateSharedMemory)));
-
- video_encoder.reset();
- vea_factory.FinishCreatingVideoEncodeAccelerator();
- task_runner->RunTasks();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/sender/fake_software_video_encoder.cc b/chromium/media/cast/sender/fake_software_video_encoder.cc
index 12b6b775efc..7a2a333153e 100644
--- a/chromium/media/cast/sender/fake_software_video_encoder.cc
+++ b/chromium/media/cast/sender/fake_software_video_encoder.cc
@@ -33,6 +33,11 @@ void FakeSoftwareVideoEncoder::Encode(
EncodedFrame* encoded_frame) {
DCHECK(encoded_frame);
+ if (video_frame->visible_rect().size() != last_frame_size_) {
+ next_frame_is_key_ = true;
+ last_frame_size_ = video_frame->visible_rect().size();
+ }
+
encoded_frame->frame_id = frame_id_++;
if (next_frame_is_key_) {
encoded_frame->dependency = EncodedFrame::KEY;
diff --git a/chromium/media/cast/sender/fake_software_video_encoder.h b/chromium/media/cast/sender/fake_software_video_encoder.h
index cf5769c857a..a0e8af666a3 100644
--- a/chromium/media/cast/sender/fake_software_video_encoder.h
+++ b/chromium/media/cast/sender/fake_software_video_encoder.h
@@ -7,6 +7,7 @@
#include "media/cast/cast_config.h"
#include "media/cast/sender/software_video_encoder.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
namespace cast {
@@ -14,19 +15,20 @@ namespace cast {
class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
public:
FakeSoftwareVideoEncoder(const VideoSenderConfig& video_config);
- ~FakeSoftwareVideoEncoder() override;
+ ~FakeSoftwareVideoEncoder() final;
// SoftwareVideoEncoder implementations.
- void Initialize() override;
+ void Initialize() final;
void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time,
- EncodedFrame* encoded_frame) override;
- void UpdateRates(uint32 new_bitrate) override;
- void GenerateKeyFrame() override;
- void LatestFrameIdToReference(uint32 frame_id) override;
+ EncodedFrame* encoded_frame) final;
+ void UpdateRates(uint32 new_bitrate) final;
+ void GenerateKeyFrame() final;
+ void LatestFrameIdToReference(uint32 frame_id) final;
private:
VideoSenderConfig video_config_;
+ gfx::Size last_frame_size_;
bool next_frame_is_key_;
uint32 frame_id_;
uint32 frame_id_to_reference_;
diff --git a/chromium/media/cast/sender/fake_video_encode_accelerator_factory.cc b/chromium/media/cast/sender/fake_video_encode_accelerator_factory.cc
new file mode 100644
index 00000000000..24eba4b07ee
--- /dev/null
+++ b/chromium/media/cast/sender/fake_video_encode_accelerator_factory.cc
@@ -0,0 +1,81 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/sender/fake_video_encode_accelerator_factory.h"
+
+#include "base/callback_helpers.h"
+
+namespace media {
+namespace cast {
+
+FakeVideoEncodeAcceleratorFactory::FakeVideoEncodeAcceleratorFactory(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner),
+ will_init_succeed_(true),
+ auto_respond_(false),
+ vea_response_count_(0),
+ shm_response_count_(0),
+ last_response_vea_(nullptr),
+ last_response_shm_(nullptr) {}
+
+FakeVideoEncodeAcceleratorFactory::~FakeVideoEncodeAcceleratorFactory() {}
+
+void FakeVideoEncodeAcceleratorFactory::SetInitializationWillSucceed(
+ bool will_init_succeed) {
+ will_init_succeed_ = will_init_succeed;
+}
+
+void FakeVideoEncodeAcceleratorFactory::SetAutoRespond(bool auto_respond) {
+ auto_respond_ = auto_respond;
+ if (auto_respond_) {
+ if (!vea_response_callback_.is_null())
+ RespondWithVideoEncodeAccelerator();
+ if (!shm_response_callback_.is_null())
+ RespondWithSharedMemory();
+ }
+}
+
+void FakeVideoEncodeAcceleratorFactory::CreateVideoEncodeAccelerator(
+ const ReceiveVideoEncodeAcceleratorCallback& callback) {
+ DCHECK(!callback.is_null());
+ DCHECK(!next_response_vea_);
+
+ FakeVideoEncodeAccelerator* const vea =
+ new FakeVideoEncodeAccelerator(task_runner_);
+ vea->SetWillInitializationSucceed(will_init_succeed_);
+ next_response_vea_.reset(vea);
+ vea_response_callback_ = callback;
+ if (auto_respond_)
+ RespondWithVideoEncodeAccelerator();
+}
+
+void FakeVideoEncodeAcceleratorFactory::CreateSharedMemory(
+ size_t size, const ReceiveVideoEncodeMemoryCallback& callback) {
+ DCHECK(!callback.is_null());
+ DCHECK(!next_response_shm_);
+
+ next_response_shm_.reset(new base::SharedMemory());
+ CHECK(next_response_shm_->CreateAndMapAnonymous(size));
+ shm_response_callback_ = callback;
+ if (auto_respond_)
+ RespondWithSharedMemory();
+}
+
+void FakeVideoEncodeAcceleratorFactory::RespondWithVideoEncodeAccelerator() {
+ DCHECK(next_response_vea_.get());
+ last_response_vea_ = next_response_vea_.get();
+ ++vea_response_count_;
+ base::ResetAndReturn(&vea_response_callback_).Run(
+ task_runner_, next_response_vea_.Pass());
+}
+
+void FakeVideoEncodeAcceleratorFactory::RespondWithSharedMemory() {
+ DCHECK(next_response_shm_.get());
+ last_response_shm_ = next_response_shm_.get();
+ ++shm_response_count_;
+ base::ResetAndReturn(&shm_response_callback_).Run(next_response_shm_.Pass());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/fake_video_encode_accelerator_factory.h b/chromium/media/cast/sender/fake_video_encode_accelerator_factory.h
new file mode 100644
index 00000000000..73c8feb1112
--- /dev/null
+++ b/chromium/media/cast/sender/fake_video_encode_accelerator_factory.h
@@ -0,0 +1,90 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_SENDER_FAKE_VIDEO_ENCODE_ACCELERATOR_FACTORY_H_
+#define MEDIA_CAST_SENDER_FAKE_VIDEO_ENCODE_ACCELERATOR_FACTORY_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/single_thread_task_runner.h"
+#include "media/cast/cast_config.h"
+#include "media/video/fake_video_encode_accelerator.h"
+
+namespace media {
+namespace cast {
+
+// Used by test code to create fake VideoEncodeAccelerators. The test code
+// controls when the response callback is invoked.
+class FakeVideoEncodeAcceleratorFactory {
+ public:
+ explicit FakeVideoEncodeAcceleratorFactory(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+ ~FakeVideoEncodeAcceleratorFactory();
+
+ int vea_response_count() const {
+ return vea_response_count_;
+ }
+ int shm_response_count() const {
+ return shm_response_count_;
+ }
+
+ // These return the instance last responded. It is up to the caller to
+ // determine whether the pointer is still valid, since this factory does not
+ // own these objects anymore.
+ media::FakeVideoEncodeAccelerator* last_response_vea() const {
+ return static_cast<media::FakeVideoEncodeAccelerator*>(last_response_vea_);
+ }
+ base::SharedMemory* last_response_shm() const {
+ return last_response_shm_;
+ }
+
+ // Set whether the next created media::FakeVideoEncodeAccelerator will
+ // initialize successfully.
+ void SetInitializationWillSucceed(bool will_init_succeed);
+
+ // Enable/disable auto-respond mode. Default is disabled.
+ void SetAutoRespond(bool auto_respond);
+
+ // Creates a media::FakeVideoEncodeAccelerator. If in auto-respond mode,
+ // |callback| is run synchronously (i.e., before this method returns).
+ void CreateVideoEncodeAccelerator(
+ const ReceiveVideoEncodeAcceleratorCallback& callback);
+
+ // Creates shared memory of the requested |size|. If in auto-respond mode,
+ // |callback| is run synchronously (i.e., before this method returns).
+ void CreateSharedMemory(
+ size_t size,
+ const ReceiveVideoEncodeMemoryCallback& callback);
+
+ // Runs the |callback| provided to the last call to
+ // CreateVideoEncodeAccelerator() with the new VideoEncodeAccelerator
+ // instance.
+ void RespondWithVideoEncodeAccelerator();
+
+ // Runs the |callback| provided to the last call to
+ // CreateSharedMemory() with the new base::SharedMemory instance.
+ void RespondWithSharedMemory();
+
+ private:
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ bool will_init_succeed_;
+ bool auto_respond_;
+ scoped_ptr<media::VideoEncodeAccelerator> next_response_vea_;
+ ReceiveVideoEncodeAcceleratorCallback vea_response_callback_;
+ scoped_ptr<base::SharedMemory> next_response_shm_;
+ ReceiveVideoEncodeMemoryCallback shm_response_callback_;
+ int vea_response_count_;
+ int shm_response_count_;
+ media::VideoEncodeAccelerator* last_response_vea_;
+ base::SharedMemory* last_response_shm_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeVideoEncodeAcceleratorFactory);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_SENDER_FAKE_VIDEO_ENCODE_ACCELERATOR_FACTORY_H_
diff --git a/chromium/media/cast/sender/frame_sender.cc b/chromium/media/cast/sender/frame_sender.cc
index c5319a4fae0..fc9aa36bd5d 100644
--- a/chromium/media/cast/sender/frame_sender.cc
+++ b/chromium/media/cast/sender/frame_sender.cc
@@ -4,7 +4,7 @@
#include "media/cast/sender/frame_sender.h"
-#include "base/debug/trace_event.h"
+#include "base/trace_event/trace_event.h"
namespace media {
namespace cast {
@@ -25,7 +25,6 @@ const int kMaxFrameBurst = 5;
FrameSender::FrameSender(scoped_refptr<CastEnvironment> cast_environment,
bool is_audio,
CastTransportSender* const transport_sender,
- base::TimeDelta rtcp_interval,
int rtp_timebase,
uint32 ssrc,
double max_frame_rate,
@@ -35,7 +34,6 @@ FrameSender::FrameSender(scoped_refptr<CastEnvironment> cast_environment,
: cast_environment_(cast_environment),
transport_sender_(transport_sender),
ssrc_(ssrc),
- rtcp_interval_(rtcp_interval),
min_playout_delay_(min_playout_delay == base::TimeDelta() ?
max_playout_delay : min_playout_delay),
max_playout_delay_(max_playout_delay),
@@ -62,17 +60,12 @@ FrameSender::~FrameSender() {
void FrameSender::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next = rtcp_interval_;
-
- time_to_next = std::max(
- time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
cast_environment_->PostDelayedTask(
- CastEnvironment::MAIN,
- FROM_HERE,
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(&FrameSender::SendRtcpReport, weak_factory_.GetWeakPtr(),
true),
- time_to_next);
+ base::TimeDelta::FromMilliseconds(kDefaultRtcpIntervalMs));
}
void FrameSender::SendRtcpReport(bool schedule_future_reports) {
diff --git a/chromium/media/cast/sender/frame_sender.h b/chromium/media/cast/sender/frame_sender.h
index a3ef1e55119..7a53c343ffd 100644
--- a/chromium/media/cast/sender/frame_sender.h
+++ b/chromium/media/cast/sender/frame_sender.h
@@ -25,7 +25,6 @@ class FrameSender {
FrameSender(scoped_refptr<CastEnvironment> cast_environment,
bool is_audio,
CastTransportSender* const transport_sender,
- base::TimeDelta rtcp_interval,
int rtp_timebase,
uint32 ssrc,
double max_frame_rate,
@@ -107,8 +106,6 @@ class FrameSender {
// Returns the number of frames that were sent but not yet acknowledged.
int GetUnacknowledgedFrameCount() const;
- const base::TimeDelta rtcp_interval_;
-
// The total amount of time between a frame's capture/recording on the sender
// and its playback on the receiver (i.e., shown to a user). This is fixed as
// a value large enough to give the system sufficient time to encode,
@@ -150,10 +147,6 @@ class FrameSender {
// case, VideoSender will trigger a re-send of the next frame.
int duplicate_ack_counter_;
- // If this sender is ready for use, this is STATUS_AUDIO_INITIALIZED or
- // STATUS_VIDEO_INITIALIZED.
- CastInitializationStatus cast_initialization_status_;
-
// This object controls how we change the bitrate to make sure the
// buffer doesn't overflow.
scoped_ptr<CongestionControl> congestion_control_;
diff --git a/chromium/media/cast/sender/h264_vt_encoder.cc b/chromium/media/cast/sender/h264_vt_encoder.cc
new file mode 100644
index 00000000000..eb7bc603e35
--- /dev/null
+++ b/chromium/media/cast/sender/h264_vt_encoder.cc
@@ -0,0 +1,763 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/sender/h264_vt_encoder.h"
+
+#include <string>
+#include <vector>
+
+#include "base/big_endian.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/synchronization/lock.h"
+#include "media/base/mac/corevideo_glue.h"
+#include "media/base/mac/video_frame_mac.h"
+#include "media/cast/sender/video_frame_factory.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+
+// Container for the associated data of a video frame being processed.
+struct InProgressFrameEncode {
+ const RtpTimestamp rtp_timestamp;
+ const base::TimeTicks reference_time;
+ const VideoEncoder::FrameEncodedCallback frame_encoded_callback;
+
+ InProgressFrameEncode(RtpTimestamp rtp,
+ base::TimeTicks r_time,
+ VideoEncoder::FrameEncodedCallback callback)
+ : rtp_timestamp(rtp),
+ reference_time(r_time),
+ frame_encoded_callback(callback) {}
+};
+
+base::ScopedCFTypeRef<CFDictionaryRef>
+DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) {
+ return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate(
+ kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+}
+
+base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key,
+ CFTypeRef value) {
+ CFTypeRef keys[1] = {key};
+ CFTypeRef values[1] = {value};
+ return DictionaryWithKeysAndValues(keys, values, 1);
+}
+
+base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) {
+ std::vector<CFNumberRef> numbers;
+ numbers.reserve(size);
+ for (const int* end = v + size; v < end; ++v)
+ numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v));
+ base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate(
+ kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]),
+ numbers.size(), &kCFTypeArrayCallBacks));
+ for (auto& number : numbers) {
+ CFRelease(number);
+ }
+ return array;
+}
+
+template <typename NalSizeType>
+void CopyNalsToAnnexB(char* avcc_buffer,
+ const size_t avcc_size,
+ std::string* annexb_buffer) {
+ static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 ||
+ sizeof(NalSizeType) == 4,
+ "NAL size type has unsupported size");
+ static const char startcode_3[3] = {0, 0, 1};
+ DCHECK(avcc_buffer);
+ DCHECK(annexb_buffer);
+ size_t bytes_left = avcc_size;
+ while (bytes_left > 0) {
+ DCHECK_GT(bytes_left, sizeof(NalSizeType));
+ NalSizeType nal_size;
+ base::ReadBigEndian(avcc_buffer, &nal_size);
+ bytes_left -= sizeof(NalSizeType);
+ avcc_buffer += sizeof(NalSizeType);
+
+ DCHECK_GE(bytes_left, nal_size);
+ annexb_buffer->append(startcode_3, sizeof(startcode_3));
+ annexb_buffer->append(avcc_buffer, nal_size);
+ bytes_left -= nal_size;
+ avcc_buffer += nal_size;
+ }
+}
+
+// Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies
+// parameter sets for keyframes before the frame data as well.
+void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf,
+ std::string* annexb_buffer,
+ bool keyframe) {
+ // Perform two pass, one to figure out the total output size, and another to
+ // copy the data after having performed a single output allocation. Note that
+ // we'll allocate a bit more because we'll count 4 bytes instead of 3 for
+ // video NALs.
+
+ OSStatus status;
+
+ // Get the sample buffer's block buffer and format description.
+ auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf);
+ DCHECK(bb);
+ auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf);
+ DCHECK(fdesc);
+
+ size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb);
+ size_t total_bytes = bb_size;
+
+ size_t pset_count;
+ int nal_size_field_bytes;
+ status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes);
+ if (status ==
+ CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) {
+ DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header";
+ pset_count = 2;
+ nal_size_field_bytes = 4;
+ } else if (status != noErr) {
+ DLOG(ERROR)
+ << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
+ << status;
+ return;
+ }
+
+ if (keyframe) {
+ const uint8_t* pset;
+ size_t pset_size;
+ for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
+ status =
+ CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
+ if (status != noErr) {
+ DLOG(ERROR)
+ << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
+ << status;
+ return;
+ }
+ total_bytes += pset_size + nal_size_field_bytes;
+ }
+ }
+
+ annexb_buffer->reserve(total_bytes);
+
+ // Copy all parameter sets before keyframes.
+ if (keyframe) {
+ const uint8_t* pset;
+ size_t pset_size;
+ for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
+ status =
+ CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
+ if (status != noErr) {
+ DLOG(ERROR)
+ << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
+ << status;
+ return;
+ }
+ static const char startcode_4[4] = {0, 0, 0, 1};
+ annexb_buffer->append(startcode_4, sizeof(startcode_4));
+ annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size);
+ }
+ }
+
+ // Block buffers can be composed of non-contiguous chunks. For the sake of
+ // keeping this code simple, flatten non-contiguous block buffers.
+ base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb(
+ bb, base::scoped_policy::RETAIN);
+ if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) {
+ contiguous_bb.reset();
+ status = CoreMediaGlue::CMBlockBufferCreateContiguous(
+ kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0,
+ contiguous_bb.InitializeInto());
+ if (status != noErr) {
+ DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status;
+ return;
+ }
+ }
+
+ // Copy all the NAL units. In the process convert them from AVCC format
+ // (length header) to AnnexB format (start code).
+ char* bb_data;
+ status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr,
+ nullptr, &bb_data);
+ if (status != noErr) {
+ DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status;
+ return;
+ }
+
+ if (nal_size_field_bytes == 1) {
+ CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer);
+ } else if (nal_size_field_bytes == 2) {
+ CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer);
+ } else if (nal_size_field_bytes == 4) {
+ CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer);
+ } else {
+ NOTREACHED();
+ }
+}
+
+} // namespace
+
+class H264VideoToolboxEncoder::VideoFrameFactoryImpl
+ : public base::RefCountedThreadSafe<VideoFrameFactoryImpl>,
+ public VideoFrameFactory {
+ public:
+ // Type that proxies the VideoFrameFactory interface to this class.
+ class Proxy;
+
+ VideoFrameFactoryImpl(const base::WeakPtr<H264VideoToolboxEncoder>& encoder,
+ const scoped_refptr<CastEnvironment>& cast_environment)
+ : encoder_(encoder), cast_environment_(cast_environment) {}
+
+ scoped_refptr<VideoFrame> MaybeCreateFrame(
+ const gfx::Size& frame_size,
+ base::TimeDelta timestamp) final {
+ if (frame_size.IsEmpty()) {
+ DVLOG(1) << "Rejecting empty video frame.";
+ return nullptr;
+ }
+
+ base::AutoLock auto_lock(lock_);
+
+ // If the pool size does not match, speculatively reset the encoder to use
+ // the new size and return null. Cache the new frame size right away and
+ // toss away the pixel buffer pool to avoid spurious tasks until the encoder
+ // is done resetting.
+ if (frame_size != pool_frame_size_) {
+ DVLOG(1) << "MaybeCreateFrame: Detected frame size change.";
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&H264VideoToolboxEncoder::UpdateFrameSize, encoder_,
+ frame_size));
+ pool_frame_size_ = frame_size;
+ pool_.reset();
+ return nullptr;
+ }
+
+ if (!pool_) {
+ DVLOG(1) << "MaybeCreateFrame: No pixel buffer pool.";
+ return nullptr;
+ }
+
+ // Allocate a pixel buffer from the pool and return a wrapper VideoFrame.
+ base::ScopedCFTypeRef<CVPixelBufferRef> buffer;
+ auto status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool_,
+ buffer.InitializeInto());
+ if (status != kCVReturnSuccess) {
+ DLOG(ERROR) << "CVPixelBufferPoolCreatePixelBuffer failed: " << status;
+ return nullptr;
+ }
+
+ DCHECK(buffer);
+ return VideoFrame::WrapCVPixelBuffer(buffer, timestamp);
+ }
+
+ void Update(const base::ScopedCFTypeRef<CVPixelBufferPoolRef>& pool,
+ const gfx::Size& frame_size) {
+ base::AutoLock auto_lock(lock_);
+ pool_ = pool;
+ pool_frame_size_ = frame_size;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<VideoFrameFactoryImpl>;
+ ~VideoFrameFactoryImpl() final {}
+
+ base::Lock lock_;
+ base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool_;
+ gfx::Size pool_frame_size_;
+
+ // Weak back reference to the encoder and the cast envrionment so we can
+ // message the encoder when the frame size changes.
+ const base::WeakPtr<H264VideoToolboxEncoder> encoder_;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryImpl);
+};
+
+class H264VideoToolboxEncoder::VideoFrameFactoryImpl::Proxy
+ : public VideoFrameFactory {
+ public:
+ explicit Proxy(
+ const scoped_refptr<VideoFrameFactoryImpl>& video_frame_factory)
+ : video_frame_factory_(video_frame_factory) {
+ DCHECK(video_frame_factory_);
+ }
+
+ scoped_refptr<VideoFrame> MaybeCreateFrame(
+ const gfx::Size& frame_size,
+ base::TimeDelta timestamp) final {
+ return video_frame_factory_->MaybeCreateFrame(frame_size, timestamp);
+ }
+
+ private:
+ ~Proxy() final {}
+
+ const scoped_refptr<VideoFrameFactoryImpl> video_frame_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(Proxy);
+};
+
+// static
+bool H264VideoToolboxEncoder::IsSupported(
+ const VideoSenderConfig& video_config) {
+ return video_config.codec == CODEC_VIDEO_H264 && VideoToolboxGlue::Get();
+}
+
+H264VideoToolboxEncoder::H264VideoToolboxEncoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb)
+ : cast_environment_(cast_environment),
+ videotoolbox_glue_(VideoToolboxGlue::Get()),
+ video_config_(video_config),
+ status_change_cb_(status_change_cb),
+ last_frame_id_(kStartFrameId),
+ encode_next_frame_as_keyframe_(false),
+ power_suspended_(false),
+ weak_factory_(this) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!status_change_cb_.is_null());
+
+ OperationalStatus operational_status =
+ H264VideoToolboxEncoder::IsSupported(video_config)
+ ? STATUS_INITIALIZED
+ : STATUS_UNSUPPORTED_CODEC;
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(status_change_cb_, operational_status));
+
+ if (operational_status == STATUS_INITIALIZED) {
+ // Create the shared video frame factory. It persists for the combined
+ // lifetime of the encoder and all video frame factory proxies created by
+ // |CreateVideoFrameFactory| that reference it.
+ video_frame_factory_ =
+ scoped_refptr<VideoFrameFactoryImpl>(new VideoFrameFactoryImpl(
+ weak_factory_.GetWeakPtr(), cast_environment_));
+
+ // Register for power state changes.
+ auto power_monitor = base::PowerMonitor::Get();
+ if (power_monitor) {
+ power_monitor->AddObserver(this);
+ VLOG(1) << "Registered for power state changes.";
+ } else {
+ DLOG(WARNING) << "No power monitor. Process suspension will invalidate "
+ "the encoder.";
+ }
+ }
+}
+
+H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
+ DestroyCompressionSession();
+
+ // If video_frame_factory_ is not null, the encoder registered for power state
+ // changes in the ctor and it must now unregister.
+ if (video_frame_factory_) {
+ auto power_monitor = base::PowerMonitor::Get();
+ if (power_monitor)
+ power_monitor->RemoveObserver(this);
+ }
+}
+
+void H264VideoToolboxEncoder::ResetCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Ignore reset requests while power suspended.
+ if (power_suspended_)
+ return;
+
+ // Notify that we're resetting the encoder.
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(status_change_cb_, STATUS_CODEC_REINIT_PENDING));
+
+ // Destroy the current session, if any.
+ DestroyCompressionSession();
+
+ // On OS X, allow the hardware encoder. Don't require it, it does not support
+ // all configurations (some of which are used for testing).
+ base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
+#if !defined(OS_IOS)
+ encoder_spec = DictionaryWithKeyValue(
+ videotoolbox_glue_
+ ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder(),
+ kCFBooleanTrue);
+#endif
+
+ // Force 420v so that clients can easily use these buffers as GPU textures.
+ const int format[] = {
+ CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
+
+ // Keep these attachment settings in-sync with those in ConfigureSession().
+ CFTypeRef attachments_keys[] = {kCVImageBufferColorPrimariesKey,
+ kCVImageBufferTransferFunctionKey,
+ kCVImageBufferYCbCrMatrixKey};
+ CFTypeRef attachments_values[] = {kCVImageBufferColorPrimaries_ITU_R_709_2,
+ kCVImageBufferTransferFunction_ITU_R_709_2,
+ kCVImageBufferYCbCrMatrix_ITU_R_709_2};
+ CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey,
+ kCVBufferPropagatedAttachmentsKey};
+ CFTypeRef buffer_attributes_values[] = {
+ ArrayWithIntegers(format, arraysize(format)).release(),
+ DictionaryWithKeysAndValues(attachments_keys, attachments_values,
+ arraysize(attachments_keys)).release()};
+ const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes =
+ DictionaryWithKeysAndValues(buffer_attributes_keys,
+ buffer_attributes_values,
+ arraysize(buffer_attributes_keys));
+ for (auto& v : buffer_attributes_values)
+ CFRelease(v);
+
+ // Create the compression session.
+
+ // Note that the encoder object is given to the compression session as the
+ // callback context using a raw pointer. The C API does not allow us to use a
+ // smart pointer, nor is this encoder ref counted. However, this is still
+ // safe, because we 1) we own the compression session and 2) we tear it down
+ // safely. When destructing the encoder, the compression session is flushed
+ // and invalidated. Internally, VideoToolbox will join all of its threads
+ // before returning to the client. Therefore, when control returns to us, we
+ // are guaranteed that the output callback will not execute again.
+ OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
+ kCFAllocatorDefault, frame_size_.width(), frame_size_.height(),
+ CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, buffer_attributes,
+ nullptr /* compressedDataAllocator */,
+ &H264VideoToolboxEncoder::CompressionCallback,
+ reinterpret_cast<void*>(this), compression_session_.InitializeInto());
+ if (status != noErr) {
+ DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
+ // Notify that reinitialization has failed.
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(status_change_cb_, STATUS_CODEC_INIT_FAILED));
+ return;
+ }
+
+ // Configure the session (apply session properties based on the current state
+ // of the encoder, experimental tuning and requirements).
+ ConfigureCompressionSession();
+
+ // Update the video frame factory.
+ base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool(
+ videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool(
+ compression_session_),
+ base::scoped_policy::RETAIN);
+ video_frame_factory_->Update(pool, frame_size_);
+
+ // Notify that reinitialization is done.
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(status_change_cb_, STATUS_INITIALIZED));
+}
+
+void H264VideoToolboxEncoder::ConfigureCompressionSession() {
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
+ videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel());
+ SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(),
+ true);
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
+ false);
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240);
+ SetSessionProperty(
+ videotoolbox_glue_
+ ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(),
+ 240);
+ // TODO(jfroy): implement better bitrate control
+ // https://crbug.com/425352
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
+ (video_config_.min_bitrate + video_config_.max_bitrate) / 2);
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
+ video_config_.max_frame_rate);
+ // Keep these attachment settings in-sync with those in Initialize().
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(),
+ kCVImageBufferColorPrimaries_ITU_R_709_2);
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(),
+ kCVImageBufferTransferFunction_ITU_R_709_2);
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(),
+ kCVImageBufferYCbCrMatrix_ITU_R_709_2);
+ if (video_config_.max_number_of_video_buffers_used > 0) {
+ SetSessionProperty(
+ videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(),
+ video_config_.max_number_of_video_buffers_used);
+ }
+}
+
+void H264VideoToolboxEncoder::DestroyCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If the compression session exists, invalidate it. This blocks until all
+ // pending output callbacks have returned and any internal threads have
+ // joined, ensuring no output callback ever sees a dangling encoder pointer.
+ //
+ // Before destroying the compression session, the video frame factory's pool
+ // is updated to null so that no thread will produce new video frames via the
+ // factory until a new compression session is created. The current frame size
+ // is passed to prevent the video frame factory from posting |UpdateFrameSize|
+ // tasks. Indeed, |DestroyCompressionSession| is either called from
+ // |ResetCompressionSession|, in which case a new pool and frame size will be
+ // set, or from callsites that require that there be no compression session
+ // (ex: the dtor).
+ if (compression_session_) {
+ video_frame_factory_->Update(
+ base::ScopedCFTypeRef<CVPixelBufferPoolRef>(nullptr), frame_size_);
+ videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
+ compression_session_.reset();
+ }
+}
+
+bool H264VideoToolboxEncoder::EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ const FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!frame_encoded_callback.is_null());
+
+ // Reject empty video frames.
+ const gfx::Size frame_size = video_frame->visible_rect().size();
+ if (frame_size.IsEmpty()) {
+ DVLOG(1) << "Rejecting empty video frame.";
+ return false;
+ }
+
+ // Handle frame size changes. This will reset the compression session.
+ if (frame_size != frame_size_) {
+ DVLOG(1) << "EncodeVideoFrame: Detected frame size change.";
+ UpdateFrameSize(frame_size);
+ }
+
+ // Need a compression session to continue.
+ if (!compression_session_) {
+ DLOG(ERROR) << "No compression session.";
+ return false;
+ }
+
+ // Wrap the VideoFrame in a CVPixelBuffer. In all cases, no data will be
+ // copied. If the VideoFrame was created by this encoder's video frame
+ // factory, then the returned CVPixelBuffer will have been obtained from the
+ // compression session's pixel buffer pool. This will eliminate a copy of the
+ // frame into memory visible by the hardware encoder. The VideoFrame's
+ // lifetime is extended for the lifetime of the returned CVPixelBuffer.
+ auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame);
+ if (!pixel_buffer) {
+ DLOG(ERROR) << "WrapVideoFrameInCVPixelBuffer failed.";
+ return false;
+ }
+
+ // Convert the frame timestamp to CMTime.
+ auto timestamp_cm = CoreMediaGlue::CMTimeMake(
+ (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC);
+
+ // Wrap information we'll need after the frame is encoded in a heap object.
+ // We'll get the pointer back from the VideoToolbox completion callback.
+ scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
+ TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency),
+ reference_time, frame_encoded_callback));
+
+ // Build a suitable frame properties dictionary for keyframes.
+ base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
+ if (encode_next_frame_as_keyframe_) {
+ frame_props = DictionaryWithKeyValue(
+ videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
+ kCFBooleanTrue);
+ encode_next_frame_as_keyframe_ = false;
+ }
+
+ // Submit the frame to the compression session. The function returns as soon
+ // as the frame has been enqueued.
+ OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
+ compression_session_, pixel_buffer, timestamp_cm,
+ CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
+ reinterpret_cast<void*>(request.release()), nullptr);
+ if (status != noErr) {
+ DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
+ return false;
+ }
+
+ return true;
+}
+
+void H264VideoToolboxEncoder::UpdateFrameSize(const gfx::Size& size_needed) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Our video frame factory posts a task to update the frame size when its
+ // cache of the frame size differs from what the client requested. To avoid
+ // spurious encoder resets, check again here.
+ if (size_needed == frame_size_) {
+ DCHECK(compression_session_);
+ return;
+ }
+
+ VLOG(1) << "Resetting compression session (for frame size change from "
+ << frame_size_.ToString() << " to " << size_needed.ToString() << ").";
+
+ // If there is an existing session, finish every pending frame.
+ if (compression_session_) {
+ EmitFrames();
+ }
+
+ // Store the new frame size.
+ frame_size_ = size_needed;
+
+ // Reset the compression session.
+ ResetCompressionSession();
+}
+
+void H264VideoToolboxEncoder::SetBitRate(int /*new_bit_rate*/) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // VideoToolbox does not seem to support bitrate reconfiguration.
+}
+
+void H264VideoToolboxEncoder::GenerateKeyFrame() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ encode_next_frame_as_keyframe_ = true;
+}
+
+void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) {
+ // Not supported by VideoToolbox in any meaningful manner.
+}
+
+scoped_ptr<VideoFrameFactory>
+H264VideoToolboxEncoder::CreateVideoFrameFactory() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return scoped_ptr<VideoFrameFactory>(
+ new VideoFrameFactoryImpl::Proxy(video_frame_factory_));
+}
+
+void H264VideoToolboxEncoder::EmitFrames() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!compression_session_)
+ return;
+
+ OSStatus status = videotoolbox_glue_->VTCompressionSessionCompleteFrames(
+ compression_session_, CoreMediaGlue::CMTime{0, 0, 0, 0});
+ if (status != noErr) {
+ DLOG(ERROR) << " VTCompressionSessionCompleteFrames failed: " << status;
+ }
+}
+
+void H264VideoToolboxEncoder::OnSuspend() {
+ VLOG(1)
+ << "OnSuspend: Emitting all frames and destroying compression session.";
+ EmitFrames();
+ DestroyCompressionSession();
+ power_suspended_ = true;
+}
+
+void H264VideoToolboxEncoder::OnResume() {
+ power_suspended_ = false;
+
+ // Reset the compression session only if the frame size is not zero (which
+ // will obviously fail). It is possible for the frame size to be zero if no
+ // frame was submitted for encoding or requested from the video frame factory
+ // before suspension.
+ if (!frame_size_.IsEmpty()) {
+ VLOG(1) << "OnResume: Resetting compression session.";
+ ResetCompressionSession();
+ }
+}
+
+bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
+ int32_t value) {
+ base::ScopedCFTypeRef<CFNumberRef> cfvalue(
+ CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
+ return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
+ cfvalue) == noErr;
+}
+
+bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) {
+ CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
+ return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
+ cfvalue) == noErr;
+}
+
+bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
+ CFStringRef value) {
+ return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
+ value) == noErr;
+}
+
+void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
+ void* request_opaque,
+ OSStatus status,
+ VTEncodeInfoFlags info,
+ CMSampleBufferRef sbuf) {
+ auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
+ const scoped_ptr<InProgressFrameEncode> request(
+ reinterpret_cast<InProgressFrameEncode*>(request_opaque));
+ bool keyframe = false;
+ bool has_frame_data = false;
+
+ if (status != noErr) {
+ DLOG(ERROR) << " encoding failed: " << status;
+ encoder->cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(encoder->status_change_cb_, STATUS_CODEC_RUNTIME_ERROR));
+ } else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
+ DVLOG(2) << " frame dropped";
+ } else {
+ auto sample_attachments =
+ static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
+ CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
+ 0));
+
+ // If the NotSync key is not present, it implies Sync, which indicates a
+ // keyframe (at least I think, VT documentation is, erm, sparse). Could
+ // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
+ keyframe = !CFDictionaryContainsKey(
+ sample_attachments,
+ CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
+ has_frame_data = true;
+ }
+
+ // Increment the encoder-scoped frame id and assign the new value to this
+ // frame. VideoToolbox calls the output callback serially, so this is safe.
+ const uint32 frame_id = ++encoder->last_frame_id_;
+
+ scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
+ encoded_frame->frame_id = frame_id;
+ encoded_frame->reference_time = request->reference_time;
+ encoded_frame->rtp_timestamp = request->rtp_timestamp;
+ if (keyframe) {
+ encoded_frame->dependency = EncodedFrame::KEY;
+ encoded_frame->referenced_frame_id = frame_id;
+ } else {
+ encoded_frame->dependency = EncodedFrame::DEPENDENT;
+ // H.264 supports complex frame reference schemes (multiple reference
+ // frames, slice references, backward and forward references, etc). Cast
+ // doesn't support the concept of forward-referencing frame dependencies or
+ // multiple frame dependencies; so pretend that all frames are only
+ // decodable after their immediately preceding frame is decoded. This will
+ // ensure a Cast receiver only attempts to decode the frames sequentially
+ // and in order. Furthermore, the encoder is configured to never use forward
+ // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There
+ // is no way to prevent multiple reference frames.
+ encoded_frame->referenced_frame_id = frame_id - 1;
+ }
+
+ if (has_frame_data)
+ CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
+
+ encoder->cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(request->frame_encoded_callback,
+ base::Passed(&encoded_frame)));
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/h264_vt_encoder.h b/chromium/media/cast/sender/h264_vt_encoder.h
new file mode 100644
index 00000000000..1f51c230f8c
--- /dev/null
+++ b/chromium/media/cast/sender/h264_vt_encoder.h
@@ -0,0 +1,133 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_SENDER_H264_VT_ENCODER_H_
+#define MEDIA_CAST_SENDER_H264_VT_ENCODER_H_
+
+#include "base/mac/scoped_cftyperef.h"
+#include "base/power_monitor/power_observer.h"
+#include "base/threading/thread_checker.h"
+#include "media/base/mac/videotoolbox_glue.h"
+#include "media/cast/sender/size_adaptable_video_encoder_base.h"
+#include "media/cast/sender/video_encoder.h"
+
+namespace media {
+namespace cast {
+
+// VideoToolbox implementation of the media::cast::VideoEncoder interface.
+// VideoToolbox makes no guarantees that it is thread safe, so this object is
+// pinned to the thread on which it is constructed. Supports changing frame
+// sizes directly. Implements the base::PowerObserver interface to reset the
+// compression session when the host process is suspended.
+class H264VideoToolboxEncoder : public VideoEncoder,
+ public base::PowerObserver {
+ typedef CoreMediaGlue::CMSampleBufferRef CMSampleBufferRef;
+ typedef VideoToolboxGlue::VTCompressionSessionRef VTCompressionSessionRef;
+ typedef VideoToolboxGlue::VTEncodeInfoFlags VTEncodeInfoFlags;
+
+ public:
+ // Returns true if the current platform and system configuration supports
+ // using H264VideoToolboxEncoder with the given |video_config|.
+ static bool IsSupported(const VideoSenderConfig& video_config);
+
+ H264VideoToolboxEncoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb);
+ ~H264VideoToolboxEncoder() final;
+
+ // media::cast::VideoEncoder implementation
+ bool EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ const FrameEncodedCallback& frame_encoded_callback) final;
+ void SetBitRate(int new_bit_rate) final;
+ void GenerateKeyFrame() final;
+ void LatestFrameIdToReference(uint32 frame_id) final;
+ scoped_ptr<VideoFrameFactory> CreateVideoFrameFactory() final;
+ void EmitFrames() final;
+
+ // base::PowerObserver
+ void OnSuspend() final;
+ void OnResume() final;
+
+ private:
+ // VideoFrameFactory tied to the VideoToolbox encoder.
+ class VideoFrameFactoryImpl;
+
+ // Reset the encoder's compression session by destroying the existing one
+ // using DestroyCompressionSession() and creating a new one. The new session
+ // is configured using ConfigureCompressionSession().
+ void ResetCompressionSession();
+
+ // Configure the current compression session using current encoder settings.
+ void ConfigureCompressionSession();
+
+ // Destroy the current compression session if any. Blocks until all pending
+ // frames have been flushed out (similar to EmitFrames without doing any
+ // encoding work).
+ void DestroyCompressionSession();
+
+ // Update the encoder's target frame size by resetting the compression
+ // session. This will also update the video frame factory.
+ void UpdateFrameSize(const gfx::Size& size_needed);
+
+ // Set a compression session property.
+ bool SetSessionProperty(CFStringRef key, int32_t value);
+ bool SetSessionProperty(CFStringRef key, bool value);
+ bool SetSessionProperty(CFStringRef key, CFStringRef value);
+
+ // Compression session callback function to handle compressed frames.
+ static void CompressionCallback(void* encoder_opaque,
+ void* request_opaque,
+ OSStatus status,
+ VTEncodeInfoFlags info,
+ CMSampleBufferRef sbuf);
+
+ // The cast environment (contains worker threads & more).
+ const scoped_refptr<CastEnvironment> cast_environment_;
+
+ // VideoToolboxGlue provides access to VideoToolbox at runtime.
+ const VideoToolboxGlue* const videotoolbox_glue_;
+
+ // VideoSenderConfig copy so we can create compression sessions on demand.
+ // This is needed to recover from backgrounding and other events that can
+ // invalidate compression sessions.
+ const VideoSenderConfig video_config_;
+
+ // Frame size of the current compression session. Can be changed by submitting
+ // a frame of a different size, which will cause a compression session reset.
+ gfx::Size frame_size_;
+
+ // Callback used to report initialization status and runtime errors.
+ const StatusChangeCallback status_change_cb_;
+
+ // Thread checker to enforce that this object is used on a specific thread.
+ base::ThreadChecker thread_checker_;
+
+ // The compression session.
+ base::ScopedCFTypeRef<VTCompressionSessionRef> compression_session_;
+
+ // Video frame factory tied to the encoder.
+ scoped_refptr<VideoFrameFactoryImpl> video_frame_factory_;
+
+ // The ID of the last frame that was emitted.
+ uint32 last_frame_id_;
+
+ // Force next frame to be a keyframe.
+ bool encode_next_frame_as_keyframe_;
+
+ // Power suspension state.
+ bool power_suspended_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<H264VideoToolboxEncoder> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264VideoToolboxEncoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_SENDER_H264_VT_ENCODER_H_
diff --git a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
new file mode 100644
index 00000000000..fb07577d171
--- /dev/null
+++ b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
@@ -0,0 +1,413 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <queue>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/message_loop/message_loop.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/power_monitor_test_base.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/test/test_suite.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/media.h"
+#include "media/base/media_switches.h"
+#include "media/cast/sender/h264_vt_encoder.h"
+#include "media/cast/sender/video_frame_factory.h"
+#include "media/cast/test/utility/default_config.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/ffmpeg_glue.h"
+#include "media/filters/ffmpeg_video_decoder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const int kVideoWidth = 1280;
+const int kVideoHeight = 720;
+
+class MediaTestSuite : public base::TestSuite {
+ public:
+ MediaTestSuite(int argc, char** argv) : TestSuite(argc, argv) {}
+ ~MediaTestSuite() final {}
+
+ protected:
+ void Initialize() final;
+};
+
+void MediaTestSuite::Initialize() {
+ base::TestSuite::Initialize();
+ base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+ command_line->AppendSwitch(switches::kEnableInbandTextTracks);
+ media::InitializeMediaLibraryForTesting();
+}
+
+} // namespace
+
+int main(int argc, char** argv) {
+ {
+ base::AtExitManager at_exit_manager;
+ CHECK(VideoToolboxGlue::Get())
+ << "VideoToolbox is not available. Requires OS X 10.8 or iOS 8.0.";
+ }
+ MediaTestSuite test_suite(argc, argv);
+ return base::LaunchUnitTests(
+ argc, argv,
+ base::Bind(&MediaTestSuite::Run, base::Unretained(&test_suite)));
+}
+
+namespace media {
+namespace cast {
+
+// See comment in end2end_unittest.cc for details on this value.
+const double kVideoAcceptedPSNR = 38.0;
+
+void SavePipelineStatus(PipelineStatus* out_status, PipelineStatus in_status) {
+ *out_status = in_status;
+}
+
+void SaveOperationalStatus(OperationalStatus* out_status,
+ OperationalStatus in_status) {
+ *out_status = in_status;
+}
+
+class MetadataRecorder : public base::RefCountedThreadSafe<MetadataRecorder> {
+ public:
+ MetadataRecorder() : count_frames_delivered_(0) {}
+
+ int count_frames_delivered() const { return count_frames_delivered_; }
+
+ void PushExpectation(uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
+ uint32 expected_rtp_timestamp,
+ const base::TimeTicks& expected_reference_time) {
+ expectations_.push(Expectation{expected_frame_id,
+ expected_last_referenced_frame_id,
+ expected_rtp_timestamp,
+ expected_reference_time});
+ }
+
+ void CompareFrameWithExpected(scoped_ptr<EncodedFrame> encoded_frame) {
+ ASSERT_LT(0u, expectations_.size());
+ auto e = expectations_.front();
+ expectations_.pop();
+ if (e.expected_frame_id != e.expected_last_referenced_frame_id) {
+ EXPECT_EQ(EncodedFrame::DEPENDENT, encoded_frame->dependency);
+ } else {
+ EXPECT_EQ(EncodedFrame::KEY, encoded_frame->dependency);
+ }
+ EXPECT_EQ(e.expected_frame_id, encoded_frame->frame_id);
+ EXPECT_EQ(e.expected_last_referenced_frame_id,
+ encoded_frame->referenced_frame_id)
+ << "frame id: " << e.expected_frame_id;
+ EXPECT_EQ(e.expected_rtp_timestamp, encoded_frame->rtp_timestamp);
+ EXPECT_EQ(e.expected_reference_time, encoded_frame->reference_time);
+ EXPECT_FALSE(encoded_frame->data.empty());
+ ++count_frames_delivered_;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<MetadataRecorder>;
+ virtual ~MetadataRecorder() {}
+
+ int count_frames_delivered_;
+
+ struct Expectation {
+ uint32 expected_frame_id;
+ uint32 expected_last_referenced_frame_id;
+ uint32 expected_rtp_timestamp;
+ base::TimeTicks expected_reference_time;
+ };
+ std::queue<Expectation> expectations_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetadataRecorder);
+};
+
+class EndToEndFrameChecker
+ : public base::RefCountedThreadSafe<EndToEndFrameChecker> {
+ public:
+ explicit EndToEndFrameChecker(const VideoDecoderConfig& config)
+ : decoder_(base::MessageLoop::current()->task_runner()),
+ count_frames_checked_(0) {
+ PipelineStatus pipeline_status;
+ decoder_.Initialize(
+ config, false, base::Bind(&SavePipelineStatus, &pipeline_status),
+ base::Bind(&EndToEndFrameChecker::CompareFrameWithExpected,
+ base::Unretained(this)));
+ base::MessageLoop::current()->RunUntilIdle();
+ EXPECT_EQ(PIPELINE_OK, pipeline_status);
+ }
+
+ void PushExpectation(const scoped_refptr<VideoFrame>& frame) {
+ expectations_.push(frame);
+ }
+
+ void EncodeDone(scoped_ptr<EncodedFrame> encoded_frame) {
+ auto buffer = DecoderBuffer::CopyFrom(encoded_frame->bytes(),
+ encoded_frame->data.size());
+ decoder_.Decode(buffer, base::Bind(&EndToEndFrameChecker::DecodeDone,
+ base::Unretained(this)));
+ }
+
+ void CompareFrameWithExpected(const scoped_refptr<VideoFrame>& frame) {
+ ASSERT_LT(0u, expectations_.size());
+ auto& e = expectations_.front();
+ expectations_.pop();
+ EXPECT_LE(kVideoAcceptedPSNR, I420PSNR(e, frame));
+ ++count_frames_checked_;
+ }
+
+ void DecodeDone(VideoDecoder::Status status) {
+ EXPECT_EQ(VideoDecoder::kOk, status);
+ }
+
+ int count_frames_checked() const { return count_frames_checked_; }
+
+ private:
+ friend class base::RefCountedThreadSafe<EndToEndFrameChecker>;
+ virtual ~EndToEndFrameChecker() {}
+
+ FFmpegVideoDecoder decoder_;
+ std::queue<scoped_refptr<VideoFrame>> expectations_;
+ int count_frames_checked_;
+
+ DISALLOW_COPY_AND_ASSIGN(EndToEndFrameChecker);
+};
+
+void CreateFrameAndMemsetPlane(VideoFrameFactory* const video_frame_factory) {
+ const scoped_refptr<media::VideoFrame> video_frame =
+ video_frame_factory->MaybeCreateFrame(
+ gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta());
+ ASSERT_TRUE(video_frame.get());
+ auto cv_pixel_buffer = video_frame->cv_pixel_buffer();
+ ASSERT_TRUE(cv_pixel_buffer);
+ CVPixelBufferLockBaseAddress(cv_pixel_buffer, 0);
+ auto ptr = CVPixelBufferGetBaseAddressOfPlane(cv_pixel_buffer, 0);
+ ASSERT_TRUE(ptr);
+ memset(ptr, 0xfe, CVPixelBufferGetBytesPerRowOfPlane(cv_pixel_buffer, 0) *
+ CVPixelBufferGetHeightOfPlane(cv_pixel_buffer, 0));
+ CVPixelBufferUnlockBaseAddress(cv_pixel_buffer, 0);
+}
+
+void NoopFrameEncodedCallback(
+ scoped_ptr<media::cast::EncodedFrame> /*encoded_frame*/) {
+}
+
+class TestPowerSource : public base::PowerMonitorSource {
+ public:
+ void GenerateSuspendEvent() {
+ ProcessPowerEvent(SUSPEND_EVENT);
+ base::MessageLoop::current()->RunUntilIdle();
+ }
+ void GenerateResumeEvent() {
+ ProcessPowerEvent(RESUME_EVENT);
+ base::MessageLoop::current()->RunUntilIdle();
+ }
+
+ private:
+ bool IsOnBatteryPowerImpl() final { return false; }
+};
+
+class H264VideoToolboxEncoderTest : public ::testing::Test {
+ protected:
+ H264VideoToolboxEncoderTest() = default;
+
+ void SetUp() final {
+ clock_ = new base::SimpleTestTickClock();
+ clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+
+ power_source_ = new TestPowerSource();
+ power_monitor_.reset(
+ new base::PowerMonitor(scoped_ptr<TestPowerSource>(power_source_)));
+
+ cast_environment_ = new CastEnvironment(
+ scoped_ptr<base::TickClock>(clock_).Pass(),
+ message_loop_.message_loop_proxy(), message_loop_.message_loop_proxy(),
+ message_loop_.message_loop_proxy());
+ encoder_.reset(new H264VideoToolboxEncoder(
+ cast_environment_, video_sender_config_,
+ base::Bind(&SaveOperationalStatus, &operational_status_)));
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(STATUS_INITIALIZED, operational_status_);
+ }
+
+ void TearDown() final {
+ encoder_.reset();
+ message_loop_.RunUntilIdle();
+ power_monitor_.reset();
+ }
+
+ void AdvanceClockAndVideoFrameTimestamp() {
+ clock_->Advance(base::TimeDelta::FromMilliseconds(33));
+ frame_->set_timestamp(frame_->timestamp() +
+ base::TimeDelta::FromMilliseconds(33));
+ }
+
+ static void SetUpTestCase() {
+ // Reusable test data.
+ video_sender_config_ = GetDefaultVideoSenderConfig();
+ video_sender_config_.codec = CODEC_VIDEO_H264;
+ const gfx::Size size(kVideoWidth, kVideoHeight);
+ frame_ = media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrame(frame_.get(), 123);
+ }
+
+ static void TearDownTestCase() { frame_ = nullptr; }
+
+ static scoped_refptr<media::VideoFrame> frame_;
+ static VideoSenderConfig video_sender_config_;
+
+ base::SimpleTestTickClock* clock_; // Owned by CastEnvironment.
+ base::MessageLoop message_loop_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<VideoEncoder> encoder_;
+ OperationalStatus operational_status_;
+ TestPowerSource* power_source_; // Owned by the power monitor.
+ scoped_ptr<base::PowerMonitor> power_monitor_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(H264VideoToolboxEncoderTest);
+};
+
+// static
+scoped_refptr<media::VideoFrame> H264VideoToolboxEncoderTest::frame_;
+VideoSenderConfig H264VideoToolboxEncoderTest::video_sender_config_;
+
+TEST_F(H264VideoToolboxEncoderTest, CheckFrameMetadataSequence) {
+ scoped_refptr<MetadataRecorder> metadata_recorder(new MetadataRecorder());
+ VideoEncoder::FrameEncodedCallback cb = base::Bind(
+ &MetadataRecorder::CompareFrameWithExpected, metadata_recorder.get());
+
+ metadata_recorder->PushExpectation(
+ 0, 0, TimeDeltaToRtpDelta(frame_->timestamp(), kVideoFrequency),
+ clock_->NowTicks());
+ EXPECT_TRUE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+ message_loop_.RunUntilIdle();
+
+ for (uint32 frame_id = 1; frame_id < 10; ++frame_id) {
+ AdvanceClockAndVideoFrameTimestamp();
+ metadata_recorder->PushExpectation(
+ frame_id, frame_id - 1,
+ TimeDeltaToRtpDelta(frame_->timestamp(), kVideoFrequency),
+ clock_->NowTicks());
+ EXPECT_TRUE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+ }
+
+ encoder_.reset();
+ message_loop_.RunUntilIdle();
+
+ EXPECT_EQ(10, metadata_recorder->count_frames_delivered());
+}
+
+#if defined(USE_PROPRIETARY_CODECS)
+TEST_F(H264VideoToolboxEncoderTest, CheckFramesAreDecodable) {
+ VideoDecoderConfig config(kCodecH264, H264PROFILE_MAIN, frame_->format(),
+ frame_->coded_size(), frame_->visible_rect(),
+ frame_->natural_size(), nullptr, 0, false);
+ scoped_refptr<EndToEndFrameChecker> checker(new EndToEndFrameChecker(config));
+
+ VideoEncoder::FrameEncodedCallback cb =
+ base::Bind(&EndToEndFrameChecker::EncodeDone, checker.get());
+ for (uint32 frame_id = 0; frame_id < 6; ++frame_id) {
+ checker->PushExpectation(frame_);
+ EXPECT_TRUE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+ AdvanceClockAndVideoFrameTimestamp();
+ }
+
+ encoder_.reset();
+ message_loop_.RunUntilIdle();
+
+ EXPECT_EQ(5, checker->count_frames_checked());
+}
+#endif
+
+TEST_F(H264VideoToolboxEncoderTest, CheckVideoFrameFactory) {
+ auto video_frame_factory = encoder_->CreateVideoFrameFactory();
+ ASSERT_TRUE(video_frame_factory.get());
+ // The first call to |MaybeCreateFrame| will return null but post a task to
+ // the encoder to initialize for the specified frame size. We then drain the
+ // message loop. After that, the encoder should have initialized and we
+ // request a frame again.
+ ASSERT_FALSE(video_frame_factory->MaybeCreateFrame(
+ gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta()));
+ message_loop_.RunUntilIdle();
+ CreateFrameAndMemsetPlane(video_frame_factory.get());
+}
+
+TEST_F(H264VideoToolboxEncoderTest, CheckPowerMonitoring) {
+ // Encode a frame, suspend, encode a frame, resume, encode a frame.
+
+ VideoEncoder::FrameEncodedCallback cb = base::Bind(&NoopFrameEncodedCallback);
+ EXPECT_TRUE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+ power_source_->GenerateSuspendEvent();
+ EXPECT_FALSE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+ power_source_->GenerateResumeEvent();
+ EXPECT_TRUE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+}
+
+TEST_F(H264VideoToolboxEncoderTest, CheckPowerMonitoringNoInitialFrame) {
+ // Suspend, encode a frame, resume, encode a frame.
+
+ VideoEncoder::FrameEncodedCallback cb = base::Bind(&NoopFrameEncodedCallback);
+ power_source_->GenerateSuspendEvent();
+ EXPECT_FALSE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+ power_source_->GenerateResumeEvent();
+ EXPECT_TRUE(encoder_->EncodeVideoFrame(frame_, clock_->NowTicks(), cb));
+}
+
+TEST_F(H264VideoToolboxEncoderTest, CheckPowerMonitoringVideoFrameFactory) {
+ VideoEncoder::FrameEncodedCallback cb = base::Bind(&NoopFrameEncodedCallback);
+ auto video_frame_factory = encoder_->CreateVideoFrameFactory();
+ ASSERT_TRUE(video_frame_factory.get());
+
+ // The first call to |MaybeCreateFrame| will return null but post a task to
+ // the encoder to initialize for the specified frame size. We then drain the
+ // message loop. After that, the encoder should have initialized and we
+ // request a frame again.
+ ASSERT_FALSE(video_frame_factory->MaybeCreateFrame(
+ gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta()));
+ message_loop_.RunUntilIdle();
+ CreateFrameAndMemsetPlane(video_frame_factory.get());
+
+ // After a power suspension, the factory should not produce frames.
+ power_source_->GenerateSuspendEvent();
+
+ ASSERT_FALSE(video_frame_factory->MaybeCreateFrame(
+ gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta()));
+ message_loop_.RunUntilIdle();
+ ASSERT_FALSE(video_frame_factory->MaybeCreateFrame(
+ gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta()));
+
+ // After a power resume event, the factory should produce frames right away
+ // because the encoder re-initializes on its own.
+ power_source_->GenerateResumeEvent();
+ CreateFrameAndMemsetPlane(video_frame_factory.get());
+}
+
+TEST_F(H264VideoToolboxEncoderTest,
+ CheckPowerMonitoringVideoFrameFactoryNoInitialFrame) {
+ VideoEncoder::FrameEncodedCallback cb = base::Bind(&NoopFrameEncodedCallback);
+ auto video_frame_factory = encoder_->CreateVideoFrameFactory();
+ ASSERT_TRUE(video_frame_factory.get());
+
+ // After a power suspension, the factory should not produce frames.
+ power_source_->GenerateSuspendEvent();
+
+ ASSERT_FALSE(video_frame_factory->MaybeCreateFrame(
+ gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta()));
+ message_loop_.RunUntilIdle();
+ ASSERT_FALSE(video_frame_factory->MaybeCreateFrame(
+ gfx::Size(kVideoWidth, kVideoHeight), base::TimeDelta()));
+
+ // After a power resume event, the factory should produce frames right away
+ // because the encoder re-initializes on its own.
+ power_source_->GenerateResumeEvent();
+ CreateFrameAndMemsetPlane(video_frame_factory.get());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc b/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc
new file mode 100644
index 00000000000..96c65a0d505
--- /dev/null
+++ b/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc
@@ -0,0 +1,168 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/sender/size_adaptable_video_encoder_base.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+namespace cast {
+
+SizeAdaptableVideoEncoderBase::SizeAdaptableVideoEncoderBase(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb)
+ : cast_environment_(cast_environment),
+ video_config_(video_config),
+ status_change_cb_(status_change_cb),
+ frames_in_encoder_(0),
+ last_frame_id_(kStartFrameId),
+ weak_factory_(this) {
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(status_change_cb_, STATUS_INITIALIZED));
+}
+
+SizeAdaptableVideoEncoderBase::~SizeAdaptableVideoEncoderBase() {
+ DestroyEncoder();
+}
+
+bool SizeAdaptableVideoEncoderBase::EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ const FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ const gfx::Size frame_size = video_frame->visible_rect().size();
+ if (frame_size.IsEmpty()) {
+ DVLOG(1) << "Rejecting empty video frame.";
+ return false;
+ }
+ if (frames_in_encoder_ == kEncoderIsInitializing) {
+ VLOG(1) << "Dropping frame since encoder initialization is in-progress.";
+ return false;
+ }
+ if (frame_size != frame_size_ || !encoder_) {
+ VLOG(1) << "Dropping this frame, and future frames until a replacement "
+ "encoder is spun-up to handle size " << frame_size.ToString();
+ TrySpawningReplacementEncoder(frame_size);
+ return false;
+ }
+
+ const bool is_frame_accepted = encoder_->EncodeVideoFrame(
+ video_frame,
+ reference_time,
+ base::Bind(&SizeAdaptableVideoEncoderBase::OnEncodedVideoFrame,
+ weak_factory_.GetWeakPtr(),
+ frame_encoded_callback));
+ if (is_frame_accepted)
+ ++frames_in_encoder_;
+ return is_frame_accepted;
+}
+
+void SizeAdaptableVideoEncoderBase::SetBitRate(int new_bit_rate) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ video_config_.start_bitrate = new_bit_rate;
+ if (encoder_)
+ encoder_->SetBitRate(new_bit_rate);
+}
+
+void SizeAdaptableVideoEncoderBase::GenerateKeyFrame() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (encoder_)
+ encoder_->GenerateKeyFrame();
+}
+
+void SizeAdaptableVideoEncoderBase::LatestFrameIdToReference(uint32 frame_id) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (encoder_)
+ encoder_->LatestFrameIdToReference(frame_id);
+}
+
+scoped_ptr<VideoFrameFactory>
+ SizeAdaptableVideoEncoderBase::CreateVideoFrameFactory() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ return nullptr;
+}
+
+void SizeAdaptableVideoEncoderBase::EmitFrames() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (encoder_)
+ encoder_->EmitFrames();
+}
+
+StatusChangeCallback
+ SizeAdaptableVideoEncoderBase::CreateEncoderStatusChangeCallback() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ return base::Bind(&SizeAdaptableVideoEncoderBase::OnEncoderStatusChange,
+ weak_factory_.GetWeakPtr());
+}
+
+void SizeAdaptableVideoEncoderBase::OnEncoderReplaced(
+ VideoEncoder* replacement_encoder) {}
+
+void SizeAdaptableVideoEncoderBase::DestroyEncoder() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // The weak pointers are invalidated to prevent future calls back to |this|.
+ // This effectively cancels any of |encoder_|'s posted tasks that have not yet
+ // run.
+ weak_factory_.InvalidateWeakPtrs();
+ encoder_.reset();
+}
+
+void SizeAdaptableVideoEncoderBase::TrySpawningReplacementEncoder(
+ const gfx::Size& size_needed) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // If prior frames are still encoding in the current encoder, let them finish
+ // first.
+ if (frames_in_encoder_ > 0) {
+ encoder_->EmitFrames();
+ // Check again, since EmitFrames() is a synchronous operation for some
+ // encoders.
+ if (frames_in_encoder_ > 0)
+ return;
+ }
+
+ if (frames_in_encoder_ == kEncoderIsInitializing)
+ return; // Already spawned.
+
+ DestroyEncoder();
+ frames_in_encoder_ = kEncoderIsInitializing;
+ OnEncoderStatusChange(STATUS_CODEC_REINIT_PENDING);
+ VLOG(1) << "Creating replacement video encoder (for frame size change from "
+ << frame_size_.ToString() << " to "
+ << size_needed.ToString() << ").";
+ frame_size_ = size_needed;
+ encoder_ = CreateEncoder().Pass();
+ DCHECK(encoder_);
+}
+
+void SizeAdaptableVideoEncoderBase::OnEncoderStatusChange(
+ OperationalStatus status) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (frames_in_encoder_ == kEncoderIsInitializing &&
+ status == STATUS_INITIALIZED) {
+ // Begin using the replacement encoder.
+ frames_in_encoder_ = 0;
+ OnEncoderReplaced(encoder_.get());
+ }
+ status_change_cb_.Run(status);
+}
+
+void SizeAdaptableVideoEncoderBase::OnEncodedVideoFrame(
+ const FrameEncodedCallback& frame_encoded_callback,
+ scoped_ptr<EncodedFrame> encoded_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ --frames_in_encoder_;
+ DCHECK_GE(frames_in_encoder_, 0);
+ last_frame_id_ = encoded_frame->frame_id;
+ frame_encoded_callback.Run(encoded_frame.Pass());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/size_adaptable_video_encoder_base.h b/chromium/media/cast/sender/size_adaptable_video_encoder_base.h
new file mode 100644
index 00000000000..7fc29211eb4
--- /dev/null
+++ b/chromium/media/cast/sender/size_adaptable_video_encoder_base.h
@@ -0,0 +1,120 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_SENDER_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
+#define MEDIA_CAST_SENDER_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/sender/video_encoder.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+namespace cast {
+
+// Creates and owns a VideoEncoder instance. The owned instance is an
+// implementation that does not support changing frame sizes, and so
+// SizeAdaptableVideoEncoderBase acts as a proxy to automatically detect when
+// the owned instance should be replaced with one that can handle the new frame
+// size.
+class SizeAdaptableVideoEncoderBase : public VideoEncoder {
+ public:
+ SizeAdaptableVideoEncoderBase(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb);
+
+ ~SizeAdaptableVideoEncoderBase() override;
+
+ // VideoEncoder implementation.
+ bool EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ const FrameEncodedCallback& frame_encoded_callback) final;
+ void SetBitRate(int new_bit_rate) final;
+ void GenerateKeyFrame() final;
+ void LatestFrameIdToReference(uint32 frame_id) final;
+ scoped_ptr<VideoFrameFactory> CreateVideoFrameFactory() final;
+ void EmitFrames() final;
+
+ protected:
+ // Accessors for subclasses.
+ CastEnvironment* cast_environment() const {
+ return cast_environment_.get();
+ }
+ const VideoSenderConfig& video_config() const {
+ return video_config_;
+ }
+ const gfx::Size& frame_size() const {
+ return frame_size_;
+ }
+ uint32 last_frame_id() const {
+ return last_frame_id_;
+ }
+
+ // Returns a callback that calls OnEncoderStatusChange(). The callback is
+ // canceled by invalidating its bound weak pointer just before a replacement
+ // encoder is instantiated. In this scheme, OnEncoderStatusChange() can only
+ // be called by the most-recent encoder.
+ StatusChangeCallback CreateEncoderStatusChangeCallback();
+
+ // Overridden by subclasses to create a new encoder instance that handles
+ // frames of the size specified by |frame_size()|.
+ virtual scoped_ptr<VideoEncoder> CreateEncoder() = 0;
+
+ // Overridden by subclasses to perform additional steps when
+ // |replacement_encoder| becomes the active encoder.
+ virtual void OnEncoderReplaced(VideoEncoder* replacement_encoder);
+
+ // Overridden by subclasses to perform additional steps before/after the
+ // current encoder is destroyed.
+ virtual void DestroyEncoder();
+
+ private:
+ // Create and initialize a replacement video encoder, if this not already
+ // in-progress. The replacement will call back to OnEncoderStatusChange()
+ // with success/fail status.
+ void TrySpawningReplacementEncoder(const gfx::Size& size_needed);
+
+ // Called when a status change is received from an encoder.
+ void OnEncoderStatusChange(OperationalStatus status);
+
+ // Called by the |encoder_| with the next EncodedFrame.
+ void OnEncodedVideoFrame(const FrameEncodedCallback& frame_encoded_callback,
+ scoped_ptr<EncodedFrame> encoded_frame);
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+
+ // This is not const since |video_config_.starting_bitrate| is modified by
+ // SetBitRate(), for when a replacement encoder is spawned.
+ VideoSenderConfig video_config_;
+
+ // Run whenever the underlying encoder reports a status change.
+ const StatusChangeCallback status_change_cb_;
+
+ // The underlying platform video encoder and the frame size it expects.
+ scoped_ptr<VideoEncoder> encoder_;
+ gfx::Size frame_size_;
+
+ // The number of frames in |encoder_|'s pipeline. If this is set to
+ // kEncoderIsInitializing, |encoder_| is not yet ready to accept frames.
+ enum { kEncoderIsInitializing = -1 };
+ int frames_in_encoder_;
+
+ // The ID of the last frame that was emitted from |encoder_|.
+ uint32 last_frame_id_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<SizeAdaptableVideoEncoderBase> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SizeAdaptableVideoEncoderBase);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_SENDER_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
diff --git a/chromium/media/cast/sender/video_encoder.cc b/chromium/media/cast/sender/video_encoder.cc
new file mode 100644
index 00000000000..33c15c48912
--- /dev/null
+++ b/chromium/media/cast/sender/video_encoder.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/sender/video_encoder.h"
+
+#include "media/cast/sender/external_video_encoder.h"
+#include "media/cast/sender/video_encoder_impl.h"
+
+#if defined(OS_MACOSX)
+#include "media/cast/sender/h264_vt_encoder.h"
+#endif
+
+namespace media {
+namespace cast {
+
+// static
+scoped_ptr<VideoEncoder> VideoEncoder::Create(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_memory_cb) {
+ // On MacOS or IOS, attempt to use the system VideoToolbox library to
+ // perform optimized H.264 encoding.
+#if defined(OS_MACOSX) || defined(OS_IOS)
+ if (!video_config.use_external_encoder &&
+ H264VideoToolboxEncoder::IsSupported(video_config)) {
+ return scoped_ptr<VideoEncoder>(new H264VideoToolboxEncoder(
+ cast_environment, video_config, status_change_cb));
+ }
+#endif // defined(OS_MACOSX)
+
+#if !defined(OS_IOS)
+ // If the system provides a hardware-accelerated encoder, use it.
+ if (ExternalVideoEncoder::IsSupported(video_config)) {
+ return scoped_ptr<VideoEncoder>(new SizeAdaptableExternalVideoEncoder(
+ cast_environment,
+ video_config,
+ status_change_cb,
+ create_vea_cb,
+ create_video_encode_memory_cb));
+ }
+
+ // Attempt to use the software encoder implementation.
+ if (VideoEncoderImpl::IsSupported(video_config)) {
+ return scoped_ptr<VideoEncoder>(new VideoEncoderImpl(
+ cast_environment,
+ video_config,
+ status_change_cb));
+ }
+#endif // !defined(OS_IOS)
+
+ // No encoder implementation will suffice.
+ return nullptr;
+}
+
+scoped_ptr<VideoFrameFactory> VideoEncoder::CreateVideoFrameFactory() {
+ return nullptr;
+}
+
+void VideoEncoder::EmitFrames() {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/video_encoder.h b/chromium/media/cast/sender/video_encoder.h
index b3bdbe44f0b..94b0bbfe3c0 100644
--- a/chromium/media/cast/sender/video_encoder.h
+++ b/chromium/media/cast/sender/video_encoder.h
@@ -8,11 +8,11 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/sender/video_frame_factory.h"
namespace media {
namespace cast {
@@ -22,6 +22,22 @@ class VideoEncoder {
public:
typedef base::Callback<void(scoped_ptr<EncodedFrame>)> FrameEncodedCallback;
+ // Creates a VideoEncoder instance from the given |video_config| and based on
+ // the current platform's hardware/library support; or null if no
+ // implementation will suffice. The instance will run |status_change_cb| at
+ // some point in the future to indicate initialization success/failure.
+ //
+ // All VideoEncoder instances returned by this function support encoding
+ // sequences of differently-size VideoFrames.
+ //
+ // TODO(miu): Remove the CreateVEA callbacks. http://crbug.com/454029
+ static scoped_ptr<VideoEncoder> Create(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_memory_cb);
+
virtual ~VideoEncoder() {}
// If true is returned, the Encoder has accepted the request and will process
@@ -41,6 +57,19 @@ class VideoEncoder {
// Inform the encoder to only reference frames older or equal to frame_id;
virtual void LatestFrameIdToReference(uint32 frame_id) = 0;
+
+ // Creates a |VideoFrameFactory| object to vend |VideoFrame| object with
+ // encoder affinity (defined as offering some sort of performance benefit).
+ // This is an optional capability and by default returns null.
+ virtual scoped_ptr<VideoFrameFactory> CreateVideoFrameFactory();
+
+ // Instructs the encoder to finish and emit all frames that have been
+ // submitted for encoding. An encoder may hold a certain number of frames for
+ // analysis. Under certain network conditions, particularly when there is
+ // network congestion, it is necessary to flush out of the encoder all
+ // submitted frames so that eventually new frames may be encoded. Like
+ // EncodeVideoFrame(), the encoder will process this request asynchronously.
+ virtual void EmitFrames();
};
} // namespace cast
diff --git a/chromium/media/cast/sender/video_encoder_impl.cc b/chromium/media/cast/sender/video_encoder_impl.cc
index a54ddfc7efd..84f700e2cf9 100644
--- a/chromium/media/cast/sender/video_encoder_impl.cc
+++ b/chromium/media/cast/sender/video_encoder_impl.cc
@@ -52,10 +52,23 @@ void EncodeVideoFrameOnEncoderThread(
}
} // namespace
+// static
+bool VideoEncoderImpl::IsSupported(const VideoSenderConfig& video_config) {
+#ifndef OFFICIAL_BUILD
+ if (video_config.codec == CODEC_VIDEO_FAKE)
+ return true;
+#endif
+ return video_config.codec == CODEC_VIDEO_VP8;
+}
+
VideoEncoderImpl::VideoEncoderImpl(
scoped_refptr<CastEnvironment> cast_environment,
- const VideoSenderConfig& video_config)
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb)
: cast_environment_(cast_environment) {
+ CHECK(cast_environment_->HasVideoThread());
+ DCHECK(!status_change_cb.is_null());
+
if (video_config.codec == CODEC_VIDEO_VP8) {
encoder_.reset(new Vp8Encoder(video_config));
cast_environment_->PostTask(CastEnvironment::VIDEO,
@@ -74,6 +87,13 @@ VideoEncoderImpl::VideoEncoderImpl(
dynamic_config_.key_frame_requested = false;
dynamic_config_.latest_frame_id_to_reference = kStartFrameId;
dynamic_config_.bit_rate = video_config.start_bitrate;
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(status_change_cb,
+ encoder_.get() ? STATUS_INITIALIZED :
+ STATUS_UNSUPPORTED_CODEC));
}
VideoEncoderImpl::~VideoEncoderImpl() {
@@ -92,6 +112,9 @@ bool VideoEncoderImpl::EncodeVideoFrame(
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!video_frame->visible_rect().IsEmpty());
+ DCHECK(!frame_encoded_callback.is_null());
+
cast_environment_->PostTask(CastEnvironment::VIDEO,
FROM_HERE,
base::Bind(&EncodeVideoFrameOnEncoderThread,
diff --git a/chromium/media/cast/sender/video_encoder_impl.h b/chromium/media/cast/sender/video_encoder_impl.h
index 58a6769e210..b31efa67b98 100644
--- a/chromium/media/cast/sender/video_encoder_impl.h
+++ b/chromium/media/cast/sender/video_encoder_impl.h
@@ -29,19 +29,23 @@ class VideoEncoderImpl : public VideoEncoder {
typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
FrameEncodedCallback;
+ // Returns true if VideoEncoderImpl can be used with the given |video_config|.
+ static bool IsSupported(const VideoSenderConfig& video_config);
+
VideoEncoderImpl(scoped_refptr<CastEnvironment> cast_environment,
- const VideoSenderConfig& video_config);
+ const VideoSenderConfig& video_config,
+ const StatusChangeCallback& status_change_cb);
- ~VideoEncoderImpl() override;
+ ~VideoEncoderImpl() final;
// VideoEncoder implementation.
bool EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time,
- const FrameEncodedCallback& frame_encoded_callback) override;
- void SetBitRate(int new_bit_rate) override;
- void GenerateKeyFrame() override;
- void LatestFrameIdToReference(uint32 frame_id) override;
+ const FrameEncodedCallback& frame_encoded_callback) final;
+ void SetBitRate(int new_bit_rate) final;
+ void GenerateKeyFrame() final;
+ void LatestFrameIdToReference(uint32 frame_id) final;
private:
scoped_refptr<CastEnvironment> cast_environment_;
diff --git a/chromium/media/cast/sender/video_encoder_impl_unittest.cc b/chromium/media/cast/sender/video_encoder_impl_unittest.cc
deleted file mode 100644
index d6f59497b4c..00000000000
--- a/chromium/media/cast/sender/video_encoder_impl_unittest.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/video_frame.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/sender/video_encoder_impl.h"
-#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/test/utility/default_config.h"
-#include "media/cast/test/utility/video_utility.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-using testing::_;
-
-namespace {
-class TestVideoEncoderCallback
- : public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
- public:
- explicit TestVideoEncoderCallback(bool multiple_buffer_mode)
- : multiple_buffer_mode_(multiple_buffer_mode),
- count_frames_delivered_(0) {}
-
- int count_frames_delivered() const {
- return count_frames_delivered_;
- }
-
- void SetExpectedResult(uint32 expected_frame_id,
- uint32 expected_last_referenced_frame_id,
- uint32 expected_rtp_timestamp,
- const base::TimeTicks& expected_reference_time) {
- expected_frame_id_ = expected_frame_id;
- expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
- expected_rtp_timestamp_ = expected_rtp_timestamp;
- expected_reference_time_ = expected_reference_time;
- }
-
- void DeliverEncodedVideoFrame(
- scoped_ptr<EncodedFrame> encoded_frame) {
- if (expected_frame_id_ != expected_last_referenced_frame_id_) {
- EXPECT_EQ(EncodedFrame::DEPENDENT, encoded_frame->dependency);
- } else if (!multiple_buffer_mode_) {
- EXPECT_EQ(EncodedFrame::KEY, encoded_frame->dependency);
- }
- EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
- EXPECT_EQ(expected_last_referenced_frame_id_,
- encoded_frame->referenced_frame_id)
- << "frame id: " << expected_frame_id_;
- EXPECT_EQ(expected_rtp_timestamp_, encoded_frame->rtp_timestamp);
- EXPECT_EQ(expected_reference_time_, encoded_frame->reference_time);
- EXPECT_FALSE(encoded_frame->data.empty());
- ++count_frames_delivered_;
- }
-
- private:
- friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
- virtual ~TestVideoEncoderCallback() {}
-
- const bool multiple_buffer_mode_;
- int count_frames_delivered_;
-
- uint32 expected_frame_id_;
- uint32 expected_last_referenced_frame_id_;
- uint32 expected_rtp_timestamp_;
- base::TimeTicks expected_reference_time_;
-
- DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
-};
-} // namespace
-
-class VideoEncoderImplTest : public ::testing::Test {
- protected:
- VideoEncoderImplTest() {
- video_config_ = GetDefaultVideoSenderConfig();
- video_config_.codec = CODEC_VIDEO_VP8;
- gfx::Size size(video_config_.width, video_config_.height);
- video_frame_ = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
- PopulateVideoFrame(video_frame_.get(), 123);
- }
-
- ~VideoEncoderImplTest() override {}
-
- void SetUp() override {
- testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
- task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
- cast_environment_ =
- new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
- task_runner_,
- task_runner_,
- task_runner_);
- }
-
- void TearDown() override {
- video_encoder_.reset();
- task_runner_->RunTasks();
- }
-
- void CreateEncoder() {
- test_video_encoder_callback_ = new TestVideoEncoderCallback(
- video_config_.max_number_of_video_buffers_used != 1);
- video_encoder_.reset(
- new VideoEncoderImpl(cast_environment_, video_config_));
- }
-
- void AdvanceClockAndVideoFrameTimestamp() {
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
- video_frame_->set_timestamp(
- video_frame_->timestamp() + base::TimeDelta::FromMilliseconds(33));
- }
-
- base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
- scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
- VideoSenderConfig video_config_;
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- scoped_ptr<VideoEncoder> video_encoder_;
- scoped_refptr<media::VideoFrame> video_frame_;
-
- scoped_refptr<CastEnvironment> cast_environment_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoEncoderImplTest);
-};
-
-TEST_F(VideoEncoderImplTest, GeneratesKeyFrameThenOnlyDeltaFrames) {
- CreateEncoder();
-
- VideoEncoder::FrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
- test_video_encoder_callback_.get());
-
- EXPECT_EQ(0, test_video_encoder_callback_->count_frames_delivered());
-
- test_video_encoder_callback_->SetExpectedResult(
- 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
-
- for (uint32 frame_id = 1; frame_id < 10; ++frame_id) {
- AdvanceClockAndVideoFrameTimestamp();
- test_video_encoder_callback_->SetExpectedResult(
- frame_id,
- frame_id - 1,
- TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
- }
-
- EXPECT_EQ(10, test_video_encoder_callback_->count_frames_delivered());
-}
-
-TEST_F(VideoEncoderImplTest,
- FramesDoNotDependOnUnackedFramesInMultiBufferMode) {
- video_config_.max_number_of_video_buffers_used = 3;
- CreateEncoder();
-
- VideoEncoder::FrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
- test_video_encoder_callback_.get());
-
- EXPECT_EQ(0, test_video_encoder_callback_->count_frames_delivered());
-
- test_video_encoder_callback_->SetExpectedResult(
- 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
-
- AdvanceClockAndVideoFrameTimestamp();
- video_encoder_->LatestFrameIdToReference(0);
- test_video_encoder_callback_->SetExpectedResult(
- 1, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
-
- AdvanceClockAndVideoFrameTimestamp();
- video_encoder_->LatestFrameIdToReference(1);
- test_video_encoder_callback_->SetExpectedResult(
- 2, 1, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_->LatestFrameIdToReference(2);
-
- for (uint32 frame_id = 3; frame_id < 10; ++frame_id) {
- AdvanceClockAndVideoFrameTimestamp();
- test_video_encoder_callback_->SetExpectedResult(
- frame_id, 2,
- TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
- testing_clock_->NowTicks());
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
- task_runner_->RunTasks();
- }
-
- EXPECT_EQ(10, test_video_encoder_callback_->count_frames_delivered());
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/sender/video_encoder_unittest.cc b/chromium/media/cast/sender/video_encoder_unittest.cc
new file mode 100644
index 00000000000..9b4f697fc25
--- /dev/null
+++ b/chromium/media/cast/sender/video_encoder_unittest.cc
@@ -0,0 +1,457 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/sender/fake_video_encode_accelerator_factory.h"
+#include "media/cast/sender/video_frame_factory.h"
+#include "media/cast/sender/video_encoder.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/utility/default_config.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "media/cast/sender/h264_vt_encoder.h"
+#endif
+
+namespace media {
+namespace cast {
+
+class VideoEncoderTest
+ : public ::testing::TestWithParam<std::pair<Codec, bool>> {
+ protected:
+ VideoEncoderTest()
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)),
+ video_config_(GetDefaultVideoSenderConfig()),
+ operational_status_(STATUS_UNINITIALIZED),
+ count_frames_delivered_(0) {
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+ first_frame_time_ = testing_clock_->NowTicks();
+ }
+
+ ~VideoEncoderTest() override {}
+
+ void SetUp() final {
+ video_config_.codec = GetParam().first;
+ video_config_.use_external_encoder = GetParam().second;
+
+ if (video_config_.use_external_encoder)
+ vea_factory_.reset(new FakeVideoEncodeAcceleratorFactory(task_runner_));
+ }
+
+ void TearDown() final {
+ video_encoder_.reset();
+ RunTasksAndAdvanceClock();
+ }
+
+ void CreateEncoder(bool three_buffer_mode) {
+ ASSERT_EQ(STATUS_UNINITIALIZED, operational_status_);
+ video_config_.max_number_of_video_buffers_used =
+ (three_buffer_mode ? 3 : 1);
+ video_encoder_ = VideoEncoder::Create(
+ cast_environment_,
+ video_config_,
+ base::Bind(&VideoEncoderTest::OnOperationalStatusChange,
+ base::Unretained(this)),
+ base::Bind(
+ &FakeVideoEncodeAcceleratorFactory::CreateVideoEncodeAccelerator,
+ base::Unretained(vea_factory_.get())),
+ base::Bind(&FakeVideoEncodeAcceleratorFactory::CreateSharedMemory,
+ base::Unretained(vea_factory_.get()))).Pass();
+ RunTasksAndAdvanceClock();
+ if (is_encoder_present())
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+ }
+
+ bool is_encoder_present() const {
+ return !!video_encoder_;
+ }
+
+ bool is_testing_software_vp8_encoder() const {
+ return video_config_.codec == CODEC_VIDEO_VP8 &&
+ !video_config_.use_external_encoder;
+ }
+
+ bool is_testing_video_toolbox_encoder() const {
+ return
+#if defined(OS_MACOSX)
+ (!video_config_.use_external_encoder &&
+ H264VideoToolboxEncoder::IsSupported(video_config_)) ||
+#endif
+ false;
+ }
+
+ bool is_testing_platform_encoder() const {
+ return video_config_.use_external_encoder ||
+ is_testing_video_toolbox_encoder();
+ }
+
+ bool encoder_has_resize_delay() const {
+ return is_testing_platform_encoder() && !is_testing_video_toolbox_encoder();
+ }
+
+ VideoEncoder* video_encoder() const {
+ return video_encoder_.get();
+ }
+
+ void DestroyEncoder() {
+ video_encoder_.reset();
+ }
+
+ base::TimeTicks Now() const {
+ return testing_clock_->NowTicks();
+ }
+
+ void RunTasksAndAdvanceClock() const {
+ const base::TimeDelta frame_duration = base::TimeDelta::FromMicroseconds(
+ 1000000.0 / video_config_.max_frame_rate);
+#if defined(OS_MACOSX)
+ if (is_testing_video_toolbox_encoder()) {
+ // The H264VideoToolboxEncoder (on MAC_OSX and IOS) is not a faked
+ // implementation in these tests, and performs its encoding asynchronously
+ // on an unknown set of threads. Therefore, sleep the current thread for
+ // the real amount of time to avoid excessively spinning the CPU while
+ // waiting for something to happen.
+ base::PlatformThread::Sleep(frame_duration);
+ }
+#endif
+ task_runner_->RunTasks();
+ testing_clock_->Advance(frame_duration);
+ }
+
+ int count_frames_delivered() const {
+ return count_frames_delivered_;
+ }
+
+ void WaitForAllFramesToBeDelivered(int total_expected) const {
+ video_encoder_->EmitFrames();
+ while (count_frames_delivered_ < total_expected)
+ RunTasksAndAdvanceClock();
+ }
+
+ // Creates a new VideoFrame of the given |size|, filled with a test pattern.
+ // When available, it attempts to use the VideoFrameFactory provided by the
+ // encoder.
+ scoped_refptr<media::VideoFrame> CreateTestVideoFrame(const gfx::Size& size) {
+ const base::TimeDelta timestamp =
+ testing_clock_->NowTicks() - first_frame_time_;
+ scoped_refptr<media::VideoFrame> frame;
+ if (video_frame_factory_)
+ frame = video_frame_factory_->MaybeCreateFrame(size, timestamp);
+ if (!frame) {
+ frame = media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, timestamp);
+ }
+ PopulateVideoFrame(frame.get(), 123);
+ return frame;
+ }
+
+ // Requests encoding the |video_frame| and has the resulting frame delivered
+ // via a callback that checks for expected results. Returns false if the
+ // encoder rejected the request.
+ bool EncodeAndCheckDelivery(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ uint32 frame_id,
+ uint32 reference_frame_id) {
+ return video_encoder_->EncodeVideoFrame(
+ video_frame,
+ Now(),
+ base::Bind(&VideoEncoderTest::DeliverEncodedVideoFrame,
+ base::Unretained(this),
+ frame_id,
+ reference_frame_id,
+ TimeDeltaToRtpDelta(video_frame->timestamp(),
+ kVideoFrequency),
+ Now()));
+ }
+
+ // If the implementation of |video_encoder_| is ExternalVideoEncoder, check
+ // that the VEA factory has responded (by running the callbacks) a specific
+ // number of times. Otherwise, check that the VEA factory is inactive.
+ void ExpectVEAResponsesForExternalVideoEncoder(
+ int vea_response_count,
+ int shm_response_count) const {
+ if (!vea_factory_)
+ return;
+ EXPECT_EQ(vea_response_count, vea_factory_->vea_response_count());
+ EXPECT_EQ(shm_response_count, vea_factory_->shm_response_count());
+ }
+
+ void SetVEAFactoryAutoRespond(bool auto_respond) {
+ if (vea_factory_)
+ vea_factory_->SetAutoRespond(auto_respond);
+ }
+
+ private:
+ void OnOperationalStatusChange(OperationalStatus status) {
+ DVLOG(1) << "OnOperationalStatusChange: from " << operational_status_
+ << " to " << status;
+ operational_status_ = status;
+
+ EXPECT_TRUE(operational_status_ == STATUS_CODEC_REINIT_PENDING ||
+ operational_status_ == STATUS_INITIALIZED);
+
+ // Create the VideoFrameFactory the first time status changes to
+ // STATUS_INITIALIZED.
+ if (operational_status_ == STATUS_INITIALIZED && !video_frame_factory_)
+ video_frame_factory_ = video_encoder_->CreateVideoFrameFactory().Pass();
+ }
+
+ // Checks that |encoded_frame| matches expected values. This is the method
+ // bound in the callback returned from EncodeAndCheckDelivery().
+ void DeliverEncodedVideoFrame(
+ uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
+ uint32 expected_rtp_timestamp,
+ const base::TimeTicks& expected_reference_time,
+ scoped_ptr<EncodedFrame> encoded_frame) {
+ EXPECT_TRUE(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ EXPECT_EQ(expected_frame_id, encoded_frame->frame_id);
+ EXPECT_EQ(expected_rtp_timestamp, encoded_frame->rtp_timestamp);
+ EXPECT_EQ(expected_reference_time, encoded_frame->reference_time);
+
+ // The platform encoders are "black boxes" and may choose to vend key frames
+ // and/or empty data at any time. The software encoders, however, should
+ // strictly adhere to expected behavior.
+ if (is_testing_platform_encoder()) {
+ const bool expected_key_frame =
+ expected_frame_id == expected_last_referenced_frame_id;
+ const bool have_key_frame =
+ encoded_frame->dependency == EncodedFrame::KEY;
+ EXPECT_EQ(have_key_frame,
+ encoded_frame->frame_id == encoded_frame->referenced_frame_id);
+ LOG_IF(WARNING, expected_key_frame != have_key_frame)
+ << "Platform encoder chose to emit a "
+ << (have_key_frame ? "key" : "delta")
+ << " frame instead of the expected kind @ frame_id="
+ << encoded_frame->frame_id;
+ LOG_IF(WARNING, encoded_frame->data.empty())
+ << "Platform encoder returned an empty frame @ frame_id="
+ << encoded_frame->frame_id;
+ } else {
+ if (expected_frame_id != expected_last_referenced_frame_id) {
+ EXPECT_EQ(EncodedFrame::DEPENDENT, encoded_frame->dependency);
+ } else if (video_config_.max_number_of_video_buffers_used == 1) {
+ EXPECT_EQ(EncodedFrame::KEY, encoded_frame->dependency);
+ }
+ EXPECT_EQ(expected_last_referenced_frame_id,
+ encoded_frame->referenced_frame_id);
+ EXPECT_FALSE(encoded_frame->data.empty());
+ }
+
+ ++count_frames_delivered_;
+ }
+
+ base::SimpleTestTickClock* const testing_clock_; // Owned by CastEnvironment.
+ const scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ VideoSenderConfig video_config_;
+ scoped_ptr<FakeVideoEncodeAcceleratorFactory> vea_factory_;
+ base::TimeTicks first_frame_time_;
+ OperationalStatus operational_status_;
+ scoped_ptr<VideoEncoder> video_encoder_;
+ scoped_ptr<VideoFrameFactory> video_frame_factory_;
+
+ int count_frames_delivered_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoEncoderTest);
+};
+
+// A simple test to encode ten frames of video, expecting to see one key frame
+// followed by nine delta frames.
+TEST_P(VideoEncoderTest, GeneratesKeyFrameThenOnlyDeltaFrames) {
+ CreateEncoder(false);
+ SetVEAFactoryAutoRespond(true);
+
+ EXPECT_EQ(0, count_frames_delivered());
+ ExpectVEAResponsesForExternalVideoEncoder(0, 0);
+
+ uint32 frame_id = 0;
+ uint32 reference_frame_id = 0;
+ const gfx::Size frame_size(1280, 720);
+
+ // Some encoders drop one or more frames initially while the encoder
+ // initializes. Then, for all encoders, expect one key frame is delivered.
+ bool accepted_first_frame = false;
+ do {
+ accepted_first_frame = EncodeAndCheckDelivery(
+ CreateTestVideoFrame(frame_size), frame_id, reference_frame_id);
+ if (!encoder_has_resize_delay())
+ EXPECT_TRUE(accepted_first_frame);
+ RunTasksAndAdvanceClock();
+ } while (!accepted_first_frame);
+ ExpectVEAResponsesForExternalVideoEncoder(1, 3);
+
+ // Expect the remaining frames are encoded as delta frames.
+ for (++frame_id; frame_id < 10; ++frame_id, ++reference_frame_id) {
+ EXPECT_TRUE(EncodeAndCheckDelivery(CreateTestVideoFrame(frame_size),
+ frame_id,
+ reference_frame_id));
+ RunTasksAndAdvanceClock();
+ }
+
+ WaitForAllFramesToBeDelivered(10);
+ ExpectVEAResponsesForExternalVideoEncoder(1, 3);
+}
+
+// Tests basic frame dependency rules when using the VP8 encoder in multi-buffer
+// mode.
+TEST_P(VideoEncoderTest, FramesDoNotDependOnUnackedFramesInMultiBufferMode) {
+ if (!is_testing_software_vp8_encoder())
+ return; // Only test multibuffer mode for the software VP8 encoder.
+ CreateEncoder(true);
+
+ EXPECT_EQ(0, count_frames_delivered());
+
+ const gfx::Size frame_size(1280, 720);
+ EXPECT_TRUE(EncodeAndCheckDelivery(CreateTestVideoFrame(frame_size), 0, 0));
+ RunTasksAndAdvanceClock();
+
+ video_encoder()->LatestFrameIdToReference(0);
+ EXPECT_TRUE(EncodeAndCheckDelivery(CreateTestVideoFrame(frame_size), 1, 0));
+ RunTasksAndAdvanceClock();
+
+ video_encoder()->LatestFrameIdToReference(1);
+ EXPECT_TRUE(EncodeAndCheckDelivery(CreateTestVideoFrame(frame_size), 2, 1));
+ RunTasksAndAdvanceClock();
+
+ video_encoder()->LatestFrameIdToReference(2);
+
+ for (uint32 frame_id = 3; frame_id < 10; ++frame_id) {
+ EXPECT_TRUE(EncodeAndCheckDelivery(
+ CreateTestVideoFrame(frame_size), frame_id, 2));
+ RunTasksAndAdvanceClock();
+ }
+
+ EXPECT_EQ(10, count_frames_delivered());
+}
+
+// Tests that the encoder continues to output EncodedFrames as the frame size
+// changes. See media/cast/receiver/video_decoder_unittest.cc for a complete
+// encode/decode cycle of varied frame sizes that actually checks the frame
+// content.
+TEST_P(VideoEncoderTest, EncodesVariedFrameSizes) {
+ CreateEncoder(false);
+ SetVEAFactoryAutoRespond(true);
+
+ EXPECT_EQ(0, count_frames_delivered());
+ ExpectVEAResponsesForExternalVideoEncoder(0, 0);
+
+ std::vector<gfx::Size> frame_sizes;
+ frame_sizes.push_back(gfx::Size(1280, 720));
+ frame_sizes.push_back(gfx::Size(640, 360)); // Shrink both dimensions.
+ frame_sizes.push_back(gfx::Size(300, 200)); // Shrink both dimensions again.
+ frame_sizes.push_back(gfx::Size(200, 300)); // Same area.
+ frame_sizes.push_back(gfx::Size(600, 400)); // Grow both dimensions.
+ frame_sizes.push_back(gfx::Size(638, 400)); // Shrink only one dimension.
+ frame_sizes.push_back(gfx::Size(638, 398)); // Shrink the other dimension.
+ frame_sizes.push_back(gfx::Size(320, 180)); // Shrink both dimensions again.
+ frame_sizes.push_back(gfx::Size(322, 180)); // Grow only one dimension.
+ frame_sizes.push_back(gfx::Size(322, 182)); // Grow the other dimension.
+ frame_sizes.push_back(gfx::Size(1920, 1080)); // Grow both dimensions again.
+
+ uint32 frame_id = 0;
+
+ // Encode one frame at each size. For encoders with a resize delay, except no
+ // frames to be delivered since each frame size change will sprun
+ // re-initialization of the underlying encoder. Otherwise expect all key
+ // frames to come out.
+ for (const auto& frame_size : frame_sizes) {
+ EXPECT_EQ(!encoder_has_resize_delay(),
+ EncodeAndCheckDelivery(CreateTestVideoFrame(frame_size), frame_id,
+ frame_id));
+ RunTasksAndAdvanceClock();
+ if (!encoder_has_resize_delay())
+ ++frame_id;
+ }
+
+ // Encode 10+ frames at each size. For encoders with a resize delay, expect
+ // the first one or more frames are dropped while the encoder re-inits. Then,
+ // for all encoders, expect one key frame followed by all delta frames.
+ for (const auto& frame_size : frame_sizes) {
+ bool accepted_first_frame = false;
+ do {
+ accepted_first_frame = EncodeAndCheckDelivery(
+ CreateTestVideoFrame(frame_size), frame_id, frame_id);
+ if (!encoder_has_resize_delay())
+ EXPECT_TRUE(accepted_first_frame);
+ RunTasksAndAdvanceClock();
+ } while (!accepted_first_frame);
+ ++frame_id;
+ for (int i = 1; i < 10; ++i, ++frame_id) {
+ EXPECT_TRUE(EncodeAndCheckDelivery(CreateTestVideoFrame(frame_size),
+ frame_id,
+ frame_id - 1));
+ RunTasksAndAdvanceClock();
+ }
+ }
+
+ WaitForAllFramesToBeDelivered(10 * frame_sizes.size());
+ ExpectVEAResponsesForExternalVideoEncoder(
+ 2 * frame_sizes.size(), 6 * frame_sizes.size());
+}
+
+// Verify that everything goes well even if ExternalVideoEncoder is destroyed
+// before it has a chance to receive the VEA creation callback. For all other
+// encoders, this tests that the encoder can be safely destroyed before the task
+// is run that delivers the first EncodedFrame.
+TEST_P(VideoEncoderTest, CanBeDestroyedBeforeVEAIsCreated) {
+ CreateEncoder(false);
+
+ // Send a frame to spawn creation of the ExternalVideoEncoder instance.
+ EncodeAndCheckDelivery(CreateTestVideoFrame(gfx::Size(1280, 720)), 0, 0);
+
+ // Destroy the encoder, and confirm the VEA Factory did not respond yet.
+ DestroyEncoder();
+ ExpectVEAResponsesForExternalVideoEncoder(0, 0);
+
+ // Allow the VEA Factory to respond by running the creation callback. When
+ // the task runs, it will be a no-op since the weak pointers to the
+ // ExternalVideoEncoder were invalidated.
+ SetVEAFactoryAutoRespond(true);
+ RunTasksAndAdvanceClock();
+ ExpectVEAResponsesForExternalVideoEncoder(1, 0);
+}
+
+namespace {
+std::vector<std::pair<Codec, bool>> DetermineEncodersToTest() {
+ std::vector<std::pair<Codec, bool>> values;
+ // Fake encoder.
+ values.push_back(std::make_pair(CODEC_VIDEO_FAKE, false));
+ // Software VP8 encoder.
+ values.push_back(std::make_pair(CODEC_VIDEO_VP8, false));
+ // Hardware-accelerated encoder (faked).
+ values.push_back(std::make_pair(CODEC_VIDEO_VP8, true));
+#if defined(OS_MACOSX)
+ // VideoToolbox encoder (when VideoToolbox is present).
+ VideoSenderConfig video_config = GetDefaultVideoSenderConfig();
+ video_config.use_external_encoder = false;
+ video_config.codec = CODEC_VIDEO_H264;
+ if (H264VideoToolboxEncoder::IsSupported(video_config))
+ values.push_back(std::make_pair(CODEC_VIDEO_H264, false));
+#endif
+ return values;
+}
+} // namespace
+
+INSTANTIATE_TEST_CASE_P(
+ , VideoEncoderTest, ::testing::ValuesIn(DetermineEncodersToTest()));
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/video_frame_factory.h b/chromium/media/cast/sender/video_frame_factory.h
new file mode 100644
index 00000000000..f6b5889d3ea
--- /dev/null
+++ b/chromium/media/cast/sender/video_frame_factory.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_SENDER_VIDEO_FRAME_FACTORY_H_
+#define MEDIA_CAST_SENDER_VIDEO_FRAME_FACTORY_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+
+namespace gfx {
+class Size;
+}
+
+namespace media {
+
+class VideoFrame;
+
+namespace cast {
+
+// Interface for an object capable of vending video frames. There is no
+// requirement for a |VideoFrameFactory| to be concurrent but it must not be
+// pinned to a specific thread. Indeed, |VideoFrameFactory| implementations are
+// created by cast on the main cast thread then used by unknown client threads
+// via the |VideoFrameInput| interface.
+//
+// Clients are responsible for serialzing access to a |VideoFrameFactory|.
+// Generally speaking, it is expected that a client will be using these objects
+// from a rendering thread or callback (which may execute on different threads
+// but never concurrently with itself).
+class VideoFrameFactory {
+ public:
+ virtual ~VideoFrameFactory() {}
+
+ // Creates a |VideoFrame| suitable for input via |InsertRawVideoFrame|. Frames
+ // obtained in this manner may provide benefits such memory reuse and affinity
+ // with the encoder. The format is guaranteed to be I420 or NV12.
+ //
+ // This can transiently return null if the encoder is not yet initialized or
+ // is re-initializing. Note however that if an encoder does support optimized
+ // frames, its |VideoFrameFactory| must eventually return frames. In practice,
+ // this means that |MaybeCreateFrame| must somehow signal the encoder to
+ // perform whatever initialization is needed to eventually produce frames.
+ virtual scoped_refptr<VideoFrame> MaybeCreateFrame(
+ const gfx::Size& frame_size, base::TimeDelta timestamp) = 0;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_SENDER_VIDEO_FRAME_FACTORY_H_
diff --git a/chromium/media/cast/sender/video_sender.cc b/chromium/media/cast/sender/video_sender.cc
index 16b7159befe..c44b0f71317 100644
--- a/chromium/media/cast/sender/video_sender.cc
+++ b/chromium/media/cast/sender/video_sender.cc
@@ -8,13 +8,11 @@
#include <cstring>
#include "base/bind.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
+#include "base/trace_event/trace_event.h"
#include "media/cast/cast_defines.h"
#include "media/cast/net/cast_transport_config.h"
-#include "media/cast/sender/external_video_encoder.h"
-#include "media/cast/sender/video_encoder_impl.h"
+#include "media/cast/sender/video_encoder.h"
namespace media {
namespace cast {
@@ -32,6 +30,30 @@ const int kRoundTripsNeeded = 4;
// time).
const int kConstantTimeMs = 75;
+// Extract capture begin/end timestamps from |video_frame|'s metadata and log
+// it.
+void LogVideoCaptureTimestamps(const CastEnvironment& cast_environment,
+ const media::VideoFrame& video_frame,
+ RtpTimestamp rtp_timestamp) {
+ base::TimeTicks capture_begin_time;
+ base::TimeTicks capture_end_time;
+ if (!video_frame.metadata()->GetTimeTicks(
+ media::VideoFrameMetadata::CAPTURE_BEGIN_TIME, &capture_begin_time) ||
+ !video_frame.metadata()->GetTimeTicks(
+ media::VideoFrameMetadata::CAPTURE_END_TIME, &capture_end_time)) {
+ // The frame capture timestamps were not provided by the video capture
+ // source. Simply log the events as happening right now.
+ capture_begin_time = capture_end_time =
+ cast_environment.Clock()->NowTicks();
+ }
+ cast_environment.Logging()->InsertFrameEvent(
+ capture_begin_time, FRAME_CAPTURE_BEGIN, VIDEO_EVENT, rtp_timestamp,
+ kFrameIdUnknown);
+ cast_environment.Logging()->InsertFrameEvent(
+ capture_end_time, FRAME_CAPTURE_END, VIDEO_EVENT, rtp_timestamp,
+ kFrameIdUnknown);
+}
+
} // namespace
// Note, we use a fixed bitrate value when external video encoder is used.
@@ -41,58 +63,47 @@ const int kConstantTimeMs = 75;
VideoSender::VideoSender(
scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
- const CastInitializationCallback& initialization_cb,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
CastTransportSender* const transport_sender,
const PlayoutDelayChangeCB& playout_delay_change_cb)
: FrameSender(
- cast_environment,
- false,
- transport_sender,
- base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- kVideoFrequency,
- video_config.ssrc,
- video_config.max_frame_rate,
- video_config.min_playout_delay,
- video_config.max_playout_delay,
- video_config.use_external_encoder ?
- NewFixedCongestionControl(
- (video_config.min_bitrate + video_config.max_bitrate) / 2) :
- NewAdaptiveCongestionControl(cast_environment->Clock(),
- video_config.max_bitrate,
- video_config.min_bitrate,
- video_config.max_frame_rate)),
+ cast_environment,
+ false,
+ transport_sender,
+ kVideoFrequency,
+ video_config.ssrc,
+ video_config.max_frame_rate,
+ video_config.min_playout_delay,
+ video_config.max_playout_delay,
+ video_config.use_external_encoder
+ ? NewFixedCongestionControl(
+ (video_config.min_bitrate + video_config.max_bitrate) / 2)
+ : NewAdaptiveCongestionControl(cast_environment->Clock(),
+ video_config.max_bitrate,
+ video_config.min_bitrate,
+ video_config.max_frame_rate)),
frames_in_encoder_(0),
last_bitrate_(0),
playout_delay_change_cb_(playout_delay_change_cb),
weak_factory_(this) {
- cast_initialization_status_ = STATUS_VIDEO_UNINITIALIZED;
-
- if (video_config.use_external_encoder) {
- video_encoder_.reset(new ExternalVideoEncoder(
- cast_environment,
- video_config,
- base::Bind(&VideoSender::OnEncoderInitialized,
- weak_factory_.GetWeakPtr(), initialization_cb),
- create_vea_cb,
- create_video_encode_mem_cb));
- } else {
- // Software encoder is initialized immediately.
- video_encoder_.reset(new VideoEncoderImpl(cast_environment, video_config));
- cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
- }
-
- if (cast_initialization_status_ == STATUS_VIDEO_INITIALIZED) {
- cast_environment->PostTask(
+ video_encoder_ = VideoEncoder::Create(
+ cast_environment_,
+ video_config,
+ status_change_cb,
+ create_vea_cb,
+ create_video_encode_mem_cb);
+ if (!video_encoder_) {
+ cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(initialization_cb, cast_initialization_status_));
+ base::Bind(status_change_cb, STATUS_UNSUPPORTED_CODEC));
}
media::cast::CastTransportRtpConfig transport_config;
transport_config.ssrc = video_config.ssrc;
- transport_config.feedback_ssrc = video_config.incoming_feedback_ssrc;
+ transport_config.feedback_ssrc = video_config.receiver_ssrc;
transport_config.rtp_payload_type = video_config.rtp_payload_type;
transport_config.aes_key = video_config.aes_key;
transport_config.aes_iv_mask = video_config.aes_iv_mask;
@@ -112,23 +123,15 @@ void VideoSender::InsertRawVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (cast_initialization_status_ != STATUS_VIDEO_INITIALIZED) {
+
+ if (!video_encoder_) {
NOTREACHED();
return;
}
- DCHECK(video_encoder_.get()) << "Invalid state";
const RtpTimestamp rtp_timestamp =
TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency);
- const base::TimeTicks insertion_time = cast_environment_->Clock()->NowTicks();
- // TODO(miu): Plumb in capture timestamps. For now, make it look like capture
- // took zero time by setting the BEGIN and END event to the same timestamp.
- cast_environment_->Logging()->InsertFrameEvent(
- insertion_time, FRAME_CAPTURE_BEGIN, VIDEO_EVENT, rtp_timestamp,
- kFrameIdUnknown);
- cast_environment_->Logging()->InsertFrameEvent(
- insertion_time, FRAME_CAPTURE_END, VIDEO_EVENT, rtp_timestamp,
- kFrameIdUnknown);
+ LogVideoCaptureTimestamps(*cast_environment_, *video_frame, rtp_timestamp);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT2(
@@ -168,6 +171,14 @@ void VideoSender::InsertRawVideoFrame(
VLOG(1) << "New target delay: " << new_target_delay.InMilliseconds();
playout_delay_change_cb_.Run(new_target_delay);
}
+
+ // Some encoder implementations have a frame window for analysis. Since we
+ // are dropping this frame, unless we instruct the encoder to flush all the
+ // frames that have been enqueued for encoding, frames_in_encoder_ and
+ // last_enqueued_frame_reference_time_ will never be updated and we will
+ // drop every subsequent frame for the rest of the session.
+ video_encoder_->EmitFrames();
+
return;
}
@@ -178,6 +189,11 @@ void VideoSender::InsertRawVideoFrame(
last_bitrate_ = bitrate;
}
+ if (video_frame->visible_rect().IsEmpty()) {
+ VLOG(1) << "Rejecting empty video frame.";
+ return;
+ }
+
if (video_encoder_->EncodeVideoFrame(
video_frame,
reference_time,
@@ -193,6 +209,10 @@ void VideoSender::InsertRawVideoFrame(
}
}
+scoped_ptr<VideoFrameFactory> VideoSender::CreateVideoFrameFactory() {
+ return video_encoder_ ? video_encoder_->CreateVideoFrameFactory() : nullptr;
+}
+
int VideoSender::GetNumberOfFramesInEncoder() const {
return frames_in_encoder_;
}
@@ -211,13 +231,6 @@ void VideoSender::OnAck(uint32 frame_id) {
video_encoder_->LatestFrameIdToReference(frame_id);
}
-void VideoSender::OnEncoderInitialized(
- const CastInitializationCallback& initialization_cb,
- CastInitializationStatus status) {
- cast_initialization_status_ = status;
- initialization_cb.Run(status);
-}
-
void VideoSender::OnEncodedVideoFrame(
int encoder_bitrate,
scoped_ptr<EncodedFrame> encoded_frame) {
diff --git a/chromium/media/cast/sender/video_sender.h b/chromium/media/cast/sender/video_sender.h
index 826099ecf4d..386ddb9c762 100644
--- a/chromium/media/cast/sender/video_sender.h
+++ b/chromium/media/cast/sender/video_sender.h
@@ -24,6 +24,7 @@ namespace cast {
class CastTransportSender;
class VideoEncoder;
+class VideoFrameFactory;
typedef base::Callback<void(base::TimeDelta)> PlayoutDelayChangeCB;
@@ -39,7 +40,7 @@ class VideoSender : public FrameSender,
public:
VideoSender(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
- const CastInitializationCallback& initialization_cb,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
CastTransportSender* const transport_sender,
@@ -50,23 +51,20 @@ class VideoSender : public FrameSender,
// Note: It is not guaranteed that |video_frame| will actually be encoded and
// sent, if VideoSender detects too many frames in flight. Therefore, clients
// should be careful about the rate at which this method is called.
- //
- // Note: It is invalid to call this method if InitializationResult() returns
- // anything but STATUS_VIDEO_INITIALIZED.
void InsertRawVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time);
+ // Creates a |VideoFrameFactory| object to vend |VideoFrame| object with
+ // encoder affinity (defined as offering some sort of performance benefit). If
+ // the encoder does not have any such capability, returns null.
+ scoped_ptr<VideoFrameFactory> CreateVideoFrameFactory();
+
protected:
- int GetNumberOfFramesInEncoder() const override;
- base::TimeDelta GetInFlightMediaDuration() const override;
- void OnAck(uint32 frame_id) override;
+ int GetNumberOfFramesInEncoder() const final;
+ base::TimeDelta GetInFlightMediaDuration() const final;
+ void OnAck(uint32 frame_id) final;
private:
- // Called when the encoder is initialized or has failed to initialize.
- void OnEncoderInitialized(
- const CastInitializationCallback& initialization_cb,
- CastInitializationStatus status);
-
// Called by the |video_encoder_| with the next EncodedFrame to send.
void OnEncodedVideoFrame(int encoder_bitrate,
scoped_ptr<EncodedFrame> encoded_frame);
diff --git a/chromium/media/cast/sender/video_sender_unittest.cc b/chromium/media/cast/sender/video_sender_unittest.cc
index 120d8c7678b..1dbccf0ddc0 100644
--- a/chromium/media/cast/sender/video_sender_unittest.cc
+++ b/chromium/media/cast/sender/video_sender_unittest.cc
@@ -15,11 +15,13 @@
#include "media/cast/net/cast_transport_config.h"
#include "media/cast/net/cast_transport_sender_impl.h"
#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/sender/fake_video_encode_accelerator_factory.h"
+#include "media/cast/sender/video_frame_factory.h"
#include "media/cast/sender/video_sender.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/test/fake_video_encode_accelerator.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/video_utility.h"
+#include "media/video/fake_video_encode_accelerator.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -34,25 +36,11 @@ static const int kHeight = 240;
using testing::_;
using testing::AtLeast;
-void CreateVideoEncodeAccelerator(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- scoped_ptr<VideoEncodeAccelerator> fake_vea,
- const ReceiveVideoEncodeAcceleratorCallback& callback) {
- callback.Run(task_runner, fake_vea.Pass());
-}
-void CreateSharedMemory(
- size_t size, const ReceiveVideoEncodeMemoryCallback& callback) {
- scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
- if (!shm->CreateAndMapAnonymous(size)) {
- NOTREACHED();
- return;
- }
- callback.Run(shm.Pass());
-}
-
-void SaveInitializationStatus(CastInitializationStatus* out_status,
- CastInitializationStatus in_status) {
+void SaveOperationalStatus(OperationalStatus* out_status,
+ OperationalStatus in_status) {
+ DVLOG(1) << "OperationalStatus transitioning from " << *out_status << " to "
+ << in_status;
*out_status = in_status;
}
@@ -64,7 +52,7 @@ class TestPacketSender : public PacketSender {
paused_(false) {}
// A singular packet implies a RTCP packet.
- bool SendPacket(PacketRef packet, const base::Closure& cb) override {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) final {
if (paused_) {
stored_packet_ = packet;
callback_ = cb;
@@ -84,7 +72,7 @@ class TestPacketSender : public PacketSender {
return true;
}
- int64 GetBytesSent() override { return 0; }
+ int64 GetBytesSent() final { return 0; }
int number_of_rtp_packets() const { return number_of_rtp_packets_; }
@@ -110,18 +98,19 @@ class TestPacketSender : public PacketSender {
void IgnorePlayoutDelayChanges(base::TimeDelta unused_playout_delay) {
}
+
class PeerVideoSender : public VideoSender {
public:
PeerVideoSender(
scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
- const CastInitializationCallback& initialization_cb,
+ const StatusChangeCallback& status_change_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
CastTransportSender* const transport_sender)
: VideoSender(cast_environment,
video_config,
- initialization_cb,
+ status_change_cb,
create_vea_cb,
create_video_encode_mem_cb,
transport_sender,
@@ -132,32 +121,37 @@ class PeerVideoSender : public VideoSender {
class VideoSenderTest : public ::testing::Test {
protected:
- VideoSenderTest() {
- testing_clock_ = new base::SimpleTestTickClock();
+ VideoSenderTest()
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)),
+ operational_status_(STATUS_UNINITIALIZED),
+ vea_factory_(task_runner_) {
testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
- task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
- cast_environment_ =
- new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
- task_runner_,
- task_runner_,
- task_runner_);
+ vea_factory_.SetAutoRespond(true);
last_pixel_value_ = kPixelValue;
net::IPEndPoint dummy_endpoint;
transport_sender_.reset(new CastTransportSenderImpl(
NULL,
testing_clock_,
dummy_endpoint,
+ dummy_endpoint,
make_scoped_ptr(new base::DictionaryValue),
base::Bind(&UpdateCastTransportStatus),
BulkRawEventsCallback(),
base::TimeDelta(),
task_runner_,
+ PacketReceiverCallback(),
&transport_));
}
~VideoSenderTest() override {}
- void TearDown() override {
+ void TearDown() final {
video_sender_.reset();
task_runner_->RunTasks();
}
@@ -168,53 +162,34 @@ class VideoSenderTest : public ::testing::Test {
// If |external| is true then external video encoder (VEA) is used.
// |expect_init_sucess| is true if initialization is expected to succeed.
- CastInitializationStatus InitEncoder(bool external,
- bool expect_init_success) {
- VideoSenderConfig video_config;
- video_config.ssrc = 1;
- video_config.incoming_feedback_ssrc = 2;
- video_config.rtp_payload_type = 127;
+ void InitEncoder(bool external, bool expect_init_success) {
+ VideoSenderConfig video_config = GetDefaultVideoSenderConfig();
video_config.use_external_encoder = external;
- video_config.width = kWidth;
- video_config.height = kHeight;
- video_config.max_bitrate = 5000000;
- video_config.min_bitrate = 1000000;
- video_config.start_bitrate = 1000000;
- video_config.max_qp = 56;
- video_config.min_qp = 0;
- video_config.max_frame_rate = 30;
- video_config.max_number_of_video_buffers_used = 1;
- video_config.codec = CODEC_VIDEO_VP8;
- CastInitializationStatus status = STATUS_VIDEO_UNINITIALIZED;
+
+ ASSERT_EQ(operational_status_, STATUS_UNINITIALIZED);
if (external) {
- test::FakeVideoEncodeAccelerator* fake_vea =
- new test::FakeVideoEncodeAccelerator(
- task_runner_, &stored_bitrates_);
- fake_vea->SetWillInitializationSucceed(expect_init_success);
- scoped_ptr<VideoEncodeAccelerator> fake_vea_owner(fake_vea);
- video_sender_.reset(
- new PeerVideoSender(cast_environment_,
- video_config,
- base::Bind(&SaveInitializationStatus,
- &status),
- base::Bind(&CreateVideoEncodeAccelerator,
- task_runner_,
- base::Passed(&fake_vea_owner)),
- base::Bind(&CreateSharedMemory),
- transport_sender_.get()));
+ vea_factory_.SetInitializationWillSucceed(expect_init_success);
+ video_sender_.reset(new PeerVideoSender(
+ cast_environment_,
+ video_config,
+ base::Bind(&SaveOperationalStatus, &operational_status_),
+ base::Bind(
+ &FakeVideoEncodeAcceleratorFactory::CreateVideoEncodeAccelerator,
+ base::Unretained(&vea_factory_)),
+ base::Bind(&FakeVideoEncodeAcceleratorFactory::CreateSharedMemory,
+ base::Unretained(&vea_factory_)),
+ transport_sender_.get()));
} else {
- video_sender_.reset(
- new PeerVideoSender(cast_environment_,
- video_config,
- base::Bind(&SaveInitializationStatus,
- &status),
- CreateDefaultVideoEncodeAcceleratorCallback(),
- CreateDefaultVideoEncodeMemoryCallback(),
- transport_sender_.get()));
+ video_sender_.reset(new PeerVideoSender(
+ cast_environment_,
+ video_config,
+ base::Bind(&SaveOperationalStatus, &operational_status_),
+ CreateDefaultVideoEncodeAcceleratorCallback(),
+ CreateDefaultVideoEncodeMemoryCallback(),
+ transport_sender_.get()));
}
task_runner_->RunTasks();
- return status;
}
scoped_refptr<media::VideoFrame> GetNewVideoFrame() {
@@ -245,13 +220,14 @@ class VideoSenderTest : public ::testing::Test {
task_runner_->Sleep(base::TimeDelta::FromMilliseconds(during_ms));
}
- base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ base::SimpleTestTickClock* const testing_clock_; // Owned by CastEnvironment.
+ const scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ OperationalStatus operational_status_;
+ FakeVideoEncodeAcceleratorFactory vea_factory_;
TestPacketSender transport_;
scoped_ptr<CastTransportSenderImpl> transport_sender_;
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_ptr<PeerVideoSender> video_sender_;
- std::vector<uint32> stored_bitrates_;
- scoped_refptr<CastEnvironment> cast_environment_;
int last_pixel_value_;
base::TimeTicks first_frame_timestamp_;
@@ -259,7 +235,9 @@ class VideoSenderTest : public ::testing::Test {
};
TEST_F(VideoSenderTest, BuiltInEncoder) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
const base::TimeTicks reference_time = testing_clock_->NowTicks();
@@ -271,34 +249,65 @@ TEST_F(VideoSenderTest, BuiltInEncoder) {
}
TEST_F(VideoSenderTest, ExternalEncoder) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(true, true));
+ InitEncoder(true, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
+ // The SizeAdaptableExternalVideoEncoder initally reports STATUS_INITIALIZED
+ // so that frames will be sent to it. Therefore, no encoder activity should
+ // have occurred at this point. Send a frame to spurn creation of the
+ // underlying ExternalVideoEncoder instance.
+ if (vea_factory_.vea_response_count() == 0) {
+ video_sender_->InsertRawVideoFrame(GetNewVideoFrame(),
+ testing_clock_->NowTicks());
+ task_runner_->RunTasks();
+ }
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+ RunTasks(33);
+
+ // VideoSender created an encoder for 1280x720 frames, in order to provide the
+ // INITIALIZED status.
+ EXPECT_EQ(1, vea_factory_.vea_response_count());
+ EXPECT_EQ(3, vea_factory_.shm_response_count());
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- const base::TimeTicks reference_time = testing_clock_->NowTicks();
- video_sender_->InsertRawVideoFrame(video_frame, reference_time);
- task_runner_->RunTasks();
- video_sender_->InsertRawVideoFrame(video_frame, reference_time);
- task_runner_->RunTasks();
- video_sender_->InsertRawVideoFrame(video_frame, reference_time);
- task_runner_->RunTasks();
+ for (int i = 0; i < 3; ++i) {
+ const base::TimeTicks reference_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
+ RunTasks(33);
+ // VideoSender re-created the encoder for the 320x240 frames we're
+ // providing.
+ EXPECT_EQ(1, vea_factory_.vea_response_count());
+ EXPECT_EQ(3, vea_factory_.shm_response_count());
+ }
- // Fixed bitrate is used for external encoder. Bitrate is only once
- // to the encoder.
- EXPECT_EQ(1u, stored_bitrates_.size());
video_sender_.reset(NULL);
task_runner_->RunTasks();
+ EXPECT_EQ(1, vea_factory_.vea_response_count());
+ EXPECT_EQ(3, vea_factory_.shm_response_count());
}
TEST_F(VideoSenderTest, ExternalEncoderInitFails) {
- EXPECT_EQ(STATUS_HW_VIDEO_ENCODER_NOT_SUPPORTED,
- InitEncoder(true, false));
+ InitEncoder(true, false);
+
+ // The SizeAdaptableExternalVideoEncoder initally reports STATUS_INITIALIZED
+ // so that frames will be sent to it. Send a frame to spurn creation of the
+ // underlying ExternalVideoEncoder instance, which should result in failure.
+ if (operational_status_ == STATUS_INITIALIZED ||
+ operational_status_ == STATUS_CODEC_REINIT_PENDING) {
+ video_sender_->InsertRawVideoFrame(GetNewVideoFrame(),
+ testing_clock_->NowTicks());
+ task_runner_->RunTasks();
+ }
+ EXPECT_EQ(STATUS_CODEC_INIT_FAILED, operational_status_);
+
video_sender_.reset(NULL);
task_runner_->RunTasks();
}
TEST_F(VideoSenderTest, RtcpTimer) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
@@ -322,7 +331,8 @@ TEST_F(VideoSenderTest, RtcpTimer) {
}
TEST_F(VideoSenderTest, ResendTimer) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
@@ -350,7 +360,9 @@ TEST_F(VideoSenderTest, ResendTimer) {
}
TEST_F(VideoSenderTest, LogAckReceivedEvent) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
SimpleEventSubscriber event_subscriber;
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
@@ -382,7 +394,9 @@ TEST_F(VideoSenderTest, LogAckReceivedEvent) {
}
TEST_F(VideoSenderTest, StopSendingInTheAbsenceOfAck) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
// Send a stream of frames and don't ACK; by default we shouldn't have more
// than 4 frames in flight.
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
@@ -428,7 +442,9 @@ TEST_F(VideoSenderTest, StopSendingInTheAbsenceOfAck) {
}
TEST_F(VideoSenderTest, DuplicateAckRetransmit) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
@@ -468,7 +484,9 @@ TEST_F(VideoSenderTest, DuplicateAckRetransmit) {
}
TEST_F(VideoSenderTest, DuplicateAckRetransmitDoesNotCancelRetransmits) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
@@ -519,7 +537,9 @@ TEST_F(VideoSenderTest, DuplicateAckRetransmitDoesNotCancelRetransmits) {
}
TEST_F(VideoSenderTest, AcksCancelRetransmits) {
- EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
transport_.SetPause(true);
scoped_refptr<media::VideoFrame> video_frame = GetLargeNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
@@ -536,5 +556,12 @@ TEST_F(VideoSenderTest, AcksCancelRetransmits) {
EXPECT_EQ(0, transport_.number_of_rtp_packets());
}
+TEST_F(VideoSenderTest, CheckVideoFrameFactoryIsNull) {
+ InitEncoder(false, true);
+ ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
+
+ EXPECT_EQ(nullptr, video_sender_->CreateVideoFrameFactory().get());
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/vp8_encoder.cc b/chromium/media/cast/sender/vp8_encoder.cc
index bf430c1869c..4d397b65dec 100644
--- a/chromium/media/cast/sender/vp8_encoder.cc
+++ b/chromium/media/cast/sender/vp8_encoder.cc
@@ -28,13 +28,18 @@ Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config)
use_multiple_video_buffers_(
cast_config_.max_number_of_video_buffers_used ==
kNumberOfVp8VideoBuffers),
- raw_image_(nullptr),
key_frame_requested_(true),
+ bitrate_kbit_(cast_config_.start_bitrate / 1000),
last_encoded_frame_id_(kStartFrameId),
last_acked_frame_id_(kStartFrameId),
undroppable_frames_(0) {
config_.g_timebase.den = 0; // Not initialized.
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ buffer_state_[i].frame_id = last_encoded_frame_id_;
+ buffer_state_[i].state = kBufferStartState;
+ }
+
// VP8 have 3 buffers available for prediction, with
// max_number_of_video_buffers_used set to 1 we maximize the coding efficiency
// however in this mode we can not skip frames in the receiver to catch up
@@ -53,34 +58,58 @@ Vp8Encoder::~Vp8Encoder() {
DCHECK(thread_checker_.CalledOnValidThread());
if (is_initialized())
vpx_codec_destroy(&encoder_);
- vpx_img_free(raw_image_);
}
void Vp8Encoder::Initialize() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!is_initialized());
+ // The encoder will be created/configured when the first frame encode is
+ // requested.
+}
- // Creating a wrapper to the image - setting image data to NULL. Actual
- // pointer will be set during encode. Setting align to 1, as it is
- // meaningless (actual memory is not allocated).
- raw_image_ = vpx_img_wrap(
- NULL, VPX_IMG_FMT_I420, cast_config_.width, cast_config_.height, 1, NULL);
+void Vp8Encoder::ConfigureForNewFrameSize(const gfx::Size& frame_size) {
+ if (is_initialized()) {
+ // Workaround for VP8 bug: If the new size is strictly less-than-or-equal to
+ // the old size, in terms of area, the existing encoder instance can
+ // continue. Otherwise, completely tear-down and re-create a new encoder to
+ // avoid a shutdown crash.
+ if (frame_size.GetArea() <= gfx::Size(config_.g_w, config_.g_h).GetArea() &&
+ !use_multiple_video_buffers_) {
+ DVLOG(1) << "Continuing to use existing encoder at smaller frame size: "
+ << gfx::Size(config_.g_w, config_.g_h).ToString() << " --> "
+ << frame_size.ToString();
+ config_.g_w = frame_size.width();
+ config_.g_h = frame_size.height();
+ if (vpx_codec_enc_config_set(&encoder_, &config_) == VPX_CODEC_OK)
+ return;
+ DVLOG(1) << "libvpx rejected the attempt to use a smaller frame size in "
+ "the current instance.";
+ }
+
+ DVLOG(1) << "Destroying/Re-Creating encoder for larger frame size: "
+ << gfx::Size(config_.g_w, config_.g_h).ToString() << " --> "
+ << frame_size.ToString();
+ vpx_codec_destroy(&encoder_);
+ } else {
+ DVLOG(1) << "Creating encoder for the first frame; size: "
+ << frame_size.ToString();
+ }
+ // Reset multi-buffer mode state.
+ last_acked_frame_id_ = last_encoded_frame_id_;
+ undroppable_frames_ = 0;
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
- buffer_state_[i].frame_id = kStartFrameId;
+ buffer_state_[i].frame_id = last_encoded_frame_id_;
buffer_state_[i].state = kBufferStartState;
}
// Populate encoder configuration with default values.
- if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &config_, 0)) {
- NOTREACHED() << "Invalid return value";
- config_.g_timebase.den = 0; // Do not call vpx_codec_destroy() in dtor.
- return;
- }
+ CHECK_EQ(vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &config_, 0),
+ VPX_CODEC_OK);
config_.g_threads = cast_config_.number_of_encode_threads;
- config_.g_w = cast_config_.width;
- config_.g_h = cast_config_.height;
+ config_.g_w = frame_size.width();
+ config_.g_h = frame_size.height();
// Set the timebase to match that of base::TimeDelta.
config_.g_timebase.num = 1;
config_.g_timebase.den = base::Time::kMicrosecondsPerSecond;
@@ -89,6 +118,8 @@ void Vp8Encoder::Initialize() {
// codec requirements.
config_.g_error_resilient = 1;
}
+ // |g_pass| and |g_lag_in_frames| must be "one pass" and zero, respectively,
+ // in order for VP8 to support changing frame sizes during encoding:
config_.g_pass = VPX_RC_ONE_PASS;
config_.g_lag_in_frames = 0; // Immediate data output for each frame.
@@ -96,7 +127,7 @@ void Vp8Encoder::Initialize() {
config_.rc_dropframe_thresh = 0; // The encoder may not drop any frames.
config_.rc_resize_allowed = 0; // TODO(miu): Why not? Investigate this.
config_.rc_end_usage = VPX_CBR;
- config_.rc_target_bitrate = cast_config_.start_bitrate / 1000; // In kbit/s.
+ config_.rc_target_bitrate = bitrate_kbit_;
config_.rc_min_quantizer = cast_config_.min_qp;
config_.rc_max_quantizer = cast_config_.max_qp;
// TODO(miu): Revisit these now that the encoder is being successfully
@@ -113,24 +144,22 @@ void Vp8Encoder::Initialize() {
config_.kf_mode = VPX_KF_DISABLED;
vpx_codec_flags_t flags = 0;
- if (vpx_codec_enc_init(&encoder_, vpx_codec_vp8_cx(), &config_, flags)) {
- NOTREACHED() << "vpx_codec_enc_init() failed.";
- config_.g_timebase.den = 0; // Do not call vpx_codec_destroy() in dtor.
- return;
- }
+ CHECK_EQ(vpx_codec_enc_init(&encoder_, vpx_codec_vp8_cx(), &config_, flags),
+ VPX_CODEC_OK);
// Raise the threshold for considering macroblocks as static. The default is
// zero, so this setting makes the encoder less sensitive to motion. This
// lowers the probability of needing to utilize more CPU to search for motion
// vectors.
- vpx_codec_control(&encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
+ CHECK_EQ(vpx_codec_control(&encoder_, VP8E_SET_STATIC_THRESHOLD, 1),
+ VPX_CODEC_OK);
// Improve quality by enabling sets of codec features that utilize more CPU.
// The default is zero, with increasingly more CPU to be used as the value is
// more negative.
// TODO(miu): Document why this value was chosen and expected behaviors.
// Should this be dynamic w.r.t. hardware performance?
- vpx_codec_control(&encoder_, VP8E_SET_CPUUSED, -6);
+ CHECK_EQ(vpx_codec_control(&encoder_, VP8E_SET_CPUUSED, -6), VPX_CODEC_OK);
}
void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
@@ -139,20 +168,11 @@ void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(encoded_frame);
- CHECK(is_initialized()); // No illegal reference to |config_| or |encoder_|.
-
- // Image in vpx_image_t format.
- // Input image is const. VP8's raw image is not defined as const.
- raw_image_->planes[VPX_PLANE_Y] =
- const_cast<uint8*>(video_frame->data(VideoFrame::kYPlane));
- raw_image_->planes[VPX_PLANE_U] =
- const_cast<uint8*>(video_frame->data(VideoFrame::kUPlane));
- raw_image_->planes[VPX_PLANE_V] =
- const_cast<uint8*>(video_frame->data(VideoFrame::kVPlane));
-
- raw_image_->stride[VPX_PLANE_Y] = video_frame->stride(VideoFrame::kYPlane);
- raw_image_->stride[VPX_PLANE_U] = video_frame->stride(VideoFrame::kUPlane);
- raw_image_->stride[VPX_PLANE_V] = video_frame->stride(VideoFrame::kVPlane);
+ // Initialize on-demand. Later, if the video frame size has changed, update
+ // the encoder configuration.
+ const gfx::Size frame_size = video_frame->visible_rect().size();
+ if (!is_initialized() || gfx::Size(config_.g_w, config_.g_h) != frame_size)
+ ConfigureForNewFrameSize(frame_size);
uint32 latest_frame_id_to_reference;
Vp8Buffers buffer_to_update;
@@ -171,6 +191,27 @@ void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
GetCodecUpdateFlags(buffer_to_update, &flags);
}
+ // Wrapper for vpx_codec_encode() to access the YUV data in the |video_frame|.
+ // Only the VISIBLE rectangle within |video_frame| is exposed to the codec.
+ vpx_image_t vpx_image;
+ vpx_image_t* const result = vpx_img_wrap(
+ &vpx_image,
+ VPX_IMG_FMT_I420,
+ frame_size.width(),
+ frame_size.height(),
+ 1,
+ video_frame->data(VideoFrame::kYPlane));
+ DCHECK_EQ(result, &vpx_image);
+ vpx_image.planes[VPX_PLANE_Y] =
+ video_frame->visible_data(VideoFrame::kYPlane);
+ vpx_image.planes[VPX_PLANE_U] =
+ video_frame->visible_data(VideoFrame::kUPlane);
+ vpx_image.planes[VPX_PLANE_V] =
+ video_frame->visible_data(VideoFrame::kVPlane);
+ vpx_image.stride[VPX_PLANE_Y] = video_frame->stride(VideoFrame::kYPlane);
+ vpx_image.stride[VPX_PLANE_U] = video_frame->stride(VideoFrame::kUPlane);
+ vpx_image.stride[VPX_PLANE_V] = video_frame->stride(VideoFrame::kVPlane);
+
// The frame duration given to the VP8 codec affects a number of important
// behaviors, including: per-frame bandwidth, CPU time spent encoding,
// temporal quality trade-offs, and key/golden/alt-ref frame generation
@@ -195,7 +236,7 @@ void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
// entirely on |predicted_frame_duration| and the target bitrate setting being
// micro-managed via calls to UpdateRates().
CHECK_EQ(vpx_codec_encode(&encoder_,
- raw_image_,
+ &vpx_image,
0,
predicted_frame_duration.InMicroseconds(),
flags,
@@ -402,7 +443,7 @@ void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
if (config_.rc_target_bitrate == new_bitrate_kbit)
return;
- config_.rc_target_bitrate = new_bitrate_kbit;
+ config_.rc_target_bitrate = bitrate_kbit_ = new_bitrate_kbit;
// Update encoder context.
if (vpx_codec_enc_config_set(&encoder_, &config_)) {
diff --git a/chromium/media/cast/sender/vp8_encoder.h b/chromium/media/cast/sender/vp8_encoder.h
index 387dbf27a4b..705534c9483 100644
--- a/chromium/media/cast/sender/vp8_encoder.h
+++ b/chromium/media/cast/sender/vp8_encoder.h
@@ -11,6 +11,7 @@
#include "media/cast/cast_config.h"
#include "media/cast/sender/software_video_encoder.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
class VideoFrame;
@@ -23,16 +24,16 @@ class Vp8Encoder : public SoftwareVideoEncoder {
public:
explicit Vp8Encoder(const VideoSenderConfig& video_config);
- ~Vp8Encoder() override;
+ ~Vp8Encoder() final;
// SoftwareVideoEncoder implementations.
- void Initialize() override;
+ void Initialize() final;
void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time,
- EncodedFrame* encoded_frame) override;
- void UpdateRates(uint32 new_bitrate) override;
- void GenerateKeyFrame() override;
- void LatestFrameIdToReference(uint32 frame_id) override;
+ EncodedFrame* encoded_frame) final;
+ void UpdateRates(uint32 new_bitrate) final;
+ void GenerateKeyFrame() final;
+ void LatestFrameIdToReference(uint32 frame_id) final;
private:
enum { kNumberOfVp8VideoBuffers = 3 };
@@ -56,11 +57,17 @@ class Vp8Encoder : public SoftwareVideoEncoder {
};
bool is_initialized() const {
- // Initialize() sets the timebase denominator value to non-zero if the
- // encoder is successfully initialized, and it is zero otherwise.
+ // ConfigureForNewFrameSize() sets the timebase denominator value to
+ // non-zero if the encoder is successfully initialized, and it is zero
+ // otherwise.
return config_.g_timebase.den != 0;
}
+ // If the |encoder_| is live, attempt reconfiguration to allow it to encode
+ // frames at a new |frame_size|. Otherwise, tear it down and re-create a new
+ // |encoder_| instance.
+ void ConfigureForNewFrameSize(const gfx::Size& frame_size);
+
// Calculate which next Vp8 buffers to update with the next frame.
Vp8Buffers GetNextBufferToUpdate();
@@ -80,12 +87,13 @@ class Vp8Encoder : public SoftwareVideoEncoder {
vpx_codec_enc_cfg_t config_;
vpx_codec_ctx_t encoder_;
- // Wrapper for access to YUV data planes in a media::VideoFrame.
- vpx_image_t* raw_image_;
-
// Set to true to request the next frame emitted by Vp8Encoder be a key frame.
bool key_frame_requested_;
+ // Saves the current bitrate setting, for when the |encoder_| is reconfigured
+ // for different frame sizes.
+ int bitrate_kbit_;
+
// The |VideoFrame::timestamp()| of the last encoded frame. This is used to
// predict the duration of the next frame.
base::TimeDelta last_frame_timestamp_;
diff --git a/chromium/media/cdm/aes_decryptor.cc b/chromium/media/cdm/aes_decryptor.cc
index 416ba573074..77783e3319c 100644
--- a/chromium/media/cdm/aes_decryptor.cc
+++ b/chromium/media/cdm/aes_decryptor.cc
@@ -13,11 +13,13 @@
#include "crypto/encryptor.h"
#include "crypto/symmetric_key.h"
#include "media/base/audio_decoder_config.h"
+#include "media/base/cdm_key_information.h"
#include "media/base/cdm_promise.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
+#include "media/cdm/cenc_utils.h"
#include "media/cdm/json_web_key.h"
namespace media {
@@ -38,11 +40,11 @@ class AesDecryptor::SessionIdDecryptionKeyMap {
// Replaces value if |session_id| is already present, or adds it if not.
// This |decryption_key| becomes the latest until another insertion or
// |session_id| is erased.
- void Insert(const std::string& web_session_id,
+ void Insert(const std::string& session_id,
scoped_ptr<DecryptionKey> decryption_key);
// Deletes the entry for |session_id| if present.
- void Erase(const std::string& web_session_id);
+ void Erase(const std::string& session_id);
// Returns whether the list is empty
bool Empty() const { return key_list_.empty(); }
@@ -53,13 +55,13 @@ class AesDecryptor::SessionIdDecryptionKeyMap {
return key_list_.begin()->second;
}
- bool Contains(const std::string& web_session_id) {
- return Find(web_session_id) != key_list_.end();
+ bool Contains(const std::string& session_id) {
+ return Find(session_id) != key_list_.end();
}
private:
- // Searches the list for an element with |web_session_id|.
- KeyList::iterator Find(const std::string& web_session_id);
+ // Searches the list for an element with |session_id|.
+ KeyList::iterator Find(const std::string& session_id);
// Deletes the entry pointed to by |position|.
void Erase(KeyList::iterator position);
@@ -70,28 +72,27 @@ class AesDecryptor::SessionIdDecryptionKeyMap {
};
void AesDecryptor::SessionIdDecryptionKeyMap::Insert(
- const std::string& web_session_id,
+ const std::string& session_id,
scoped_ptr<DecryptionKey> decryption_key) {
- KeyList::iterator it = Find(web_session_id);
+ KeyList::iterator it = Find(session_id);
if (it != key_list_.end())
Erase(it);
DecryptionKey* raw_ptr = decryption_key.release();
- key_list_.push_front(std::make_pair(web_session_id, raw_ptr));
+ key_list_.push_front(std::make_pair(session_id, raw_ptr));
}
void AesDecryptor::SessionIdDecryptionKeyMap::Erase(
- const std::string& web_session_id) {
- KeyList::iterator it = Find(web_session_id);
+ const std::string& session_id) {
+ KeyList::iterator it = Find(session_id);
if (it == key_list_.end())
return;
Erase(it);
}
AesDecryptor::SessionIdDecryptionKeyMap::KeyList::iterator
-AesDecryptor::SessionIdDecryptionKeyMap::Find(
- const std::string& web_session_id) {
+AesDecryptor::SessionIdDecryptionKeyMap::Find(const std::string& session_id) {
for (KeyList::iterator it = key_list_.begin(); it != key_list_.end(); ++it) {
- if (it->first == web_session_id)
+ if (it->first == session_id)
return it;
}
return key_list_.end();
@@ -104,7 +105,7 @@ void AesDecryptor::SessionIdDecryptionKeyMap::Erase(
key_list_.erase(position);
}
-uint32 AesDecryptor::next_web_session_id_ = 1;
+uint32_t AesDecryptor::next_session_id_ = 1;
enum ClearBytesBufferSel {
kSrcContainsClearBytes,
@@ -113,8 +114,8 @@ enum ClearBytesBufferSel {
static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
const ClearBytesBufferSel sel,
- const uint8* src,
- uint8* dst) {
+ const uint8_t* src,
+ uint8_t* dst) {
for (size_t i = 0; i < subsamples.size(); i++) {
const SubsampleEntry& subsample = subsamples[i];
if (sel == kSrcContainsClearBytes) {
@@ -166,7 +167,7 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
// TODO(xhwang): Find a way to avoid this data copy.
return DecoderBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(decrypted_text.data()),
+ reinterpret_cast<const uint8_t*>(decrypted_text.data()),
decrypted_text.size());
}
@@ -191,7 +192,7 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
// No need to decrypt if there is no encrypted data.
if (total_encrypted_size <= 0) {
- return DecoderBuffer::CopyFrom(reinterpret_cast<const uint8*>(sample),
+ return DecoderBuffer::CopyFrom(reinterpret_cast<const uint8_t*>(sample),
sample_size);
}
@@ -201,9 +202,10 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
// copy all encrypted subsamples to a contiguous buffer, decrypt them, then
// copy the decrypted bytes over the encrypted bytes in the output.
// TODO(strobe): attempt to reduce number of memory copies
- scoped_ptr<uint8[]> encrypted_bytes(new uint8[total_encrypted_size]);
+ scoped_ptr<uint8_t[]> encrypted_bytes(new uint8_t[total_encrypted_size]);
CopySubsamples(subsamples, kSrcContainsClearBytes,
- reinterpret_cast<const uint8*>(sample), encrypted_bytes.get());
+ reinterpret_cast<const uint8_t*>(sample),
+ encrypted_bytes.get());
base::StringPiece encrypted_text(
reinterpret_cast<const char*>(encrypted_bytes.get()),
@@ -216,19 +218,22 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
DCHECK_EQ(decrypted_text.size(), encrypted_text.size());
scoped_refptr<DecoderBuffer> output = DecoderBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(sample), sample_size);
+ reinterpret_cast<const uint8_t*>(sample), sample_size);
CopySubsamples(subsamples, kDstContainsClearBytes,
- reinterpret_cast<const uint8*>(decrypted_text.data()),
+ reinterpret_cast<const uint8_t*>(decrypted_text.data()),
output->writable_data());
return output;
}
-AesDecryptor::AesDecryptor(const SessionMessageCB& session_message_cb,
+AesDecryptor::AesDecryptor(const GURL& /* security_origin */,
+ const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
const SessionKeysChangeCB& session_keys_change_cb)
: session_message_cb_(session_message_cb),
session_closed_cb_(session_closed_cb),
session_keys_change_cb_(session_keys_change_cb) {
+ // AesDecryptor doesn't keep any persistent data, so no need to do anything
+ // with |security_origin|.
DCHECK(!session_message_cb_.is_null());
DCHECK(!session_closed_cb_.is_null());
DCHECK(!session_keys_change_cb_.is_null());
@@ -238,80 +243,118 @@ AesDecryptor::~AesDecryptor() {
key_map_.clear();
}
-void AesDecryptor::SetServerCertificate(const uint8* certificate_data,
- int certificate_data_length,
+void AesDecryptor::SetServerCertificate(const std::vector<uint8_t>& certificate,
scoped_ptr<SimpleCdmPromise> promise) {
promise->reject(
NOT_SUPPORTED_ERROR, 0, "SetServerCertificate() is not supported.");
}
-void AesDecryptor::CreateSession(const std::string& init_data_type,
- const uint8* init_data,
- int init_data_length,
- SessionType session_type,
- scoped_ptr<NewSessionCdmPromise> promise) {
- std::string web_session_id(base::UintToString(next_web_session_id_++));
- valid_sessions_.insert(web_session_id);
-
- // For now, the AesDecryptor does not care about |init_data_type| or
- // |session_type|; just resolve the promise and then fire a message event
- // using the |init_data| as the key ID in the license request.
- // TODO(jrummell): Validate |init_data_type| and |session_type|.
- std::vector<uint8> message;
- if (init_data && init_data_length)
- CreateLicenseRequest(init_data, init_data_length, session_type, &message);
+void AesDecryptor::CreateSessionAndGenerateRequest(
+ SessionType session_type,
+ EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ scoped_ptr<NewSessionCdmPromise> promise) {
+ std::string session_id(base::UintToString(next_session_id_++));
+ valid_sessions_.insert(session_id);
+
+ // For now, the AesDecryptor does not care about |session_type|.
+ // TODO(jrummell): Validate |session_type|.
+
+ std::vector<uint8_t> message;
+ // TODO(jrummell): Since unprefixed will never send NULL, remove this check
+ // when prefixed EME is removed (http://crbug.com/249976).
+ if (!init_data.empty()) {
+ std::vector<std::vector<uint8_t>> keys;
+ switch (init_data_type) {
+ case EmeInitDataType::WEBM:
+ // |init_data| is simply the key needed.
+ keys.push_back(init_data);
+ break;
+ case EmeInitDataType::CENC:
+ // |init_data| is a set of 0 or more concatenated 'pssh' boxes.
+ if (!GetKeyIdsForCommonSystemId(init_data, &keys)) {
+ promise->reject(NOT_SUPPORTED_ERROR, 0,
+ "No supported PSSH box found.");
+ return;
+ }
+ break;
+ case EmeInitDataType::KEYIDS: {
+ std::string init_data_string(init_data.begin(), init_data.end());
+ std::string error_message;
+ if (!ExtractKeyIdsFromKeyIdsInitData(init_data_string, &keys,
+ &error_message)) {
+ promise->reject(NOT_SUPPORTED_ERROR, 0, error_message);
+ return;
+ }
+ break;
+ }
+ default:
+ NOTREACHED();
+ promise->reject(NOT_SUPPORTED_ERROR, 0,
+ "init_data_type not supported.");
+ return;
+ }
+ CreateLicenseRequest(keys, session_type, &message);
+ }
- promise->resolve(web_session_id);
+ promise->resolve(session_id);
- session_message_cb_.Run(web_session_id, message, GURL());
+ // No URL needed for license requests.
+ session_message_cb_.Run(session_id, LICENSE_REQUEST, message,
+ GURL::EmptyGURL());
}
-void AesDecryptor::LoadSession(const std::string& web_session_id,
+void AesDecryptor::LoadSession(SessionType session_type,
+ const std::string& session_id,
scoped_ptr<NewSessionCdmPromise> promise) {
// TODO(xhwang): Change this to NOTREACHED() when blink checks for key systems
// that do not support loadSession. See http://crbug.com/342481
promise->reject(NOT_SUPPORTED_ERROR, 0, "LoadSession() is not supported.");
}
-void AesDecryptor::UpdateSession(const std::string& web_session_id,
- const uint8* response,
- int response_length,
+void AesDecryptor::UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
scoped_ptr<SimpleCdmPromise> promise) {
- CHECK(response);
- CHECK_GT(response_length, 0);
+ CHECK(!response.empty());
// TODO(jrummell): Convert back to a DCHECK once prefixed EME is removed.
- if (valid_sessions_.find(web_session_id) == valid_sessions_.end()) {
+ if (valid_sessions_.find(session_id) == valid_sessions_.end()) {
promise->reject(INVALID_ACCESS_ERROR, 0, "Session does not exist.");
return;
}
- std::string key_string(reinterpret_cast<const char*>(response),
- response_length);
+ std::string key_string(response.begin(), response.end());
KeyIdAndKeyPairs keys;
SessionType session_type = MediaKeys::TEMPORARY_SESSION;
if (!ExtractKeysFromJWKSet(key_string, &keys, &session_type)) {
promise->reject(
- INVALID_ACCESS_ERROR, 0, "response is not a valid JSON Web Key Set.");
+ INVALID_ACCESS_ERROR, 0, "Response is not a valid JSON Web Key Set.");
return;
}
// Make sure that at least one key was extracted.
if (keys.empty()) {
promise->reject(
- INVALID_ACCESS_ERROR, 0, "response does not contain any keys.");
+ INVALID_ACCESS_ERROR, 0, "Response does not contain any keys.");
return;
}
+ bool key_added = false;
for (KeyIdAndKeyPairs::iterator it = keys.begin(); it != keys.end(); ++it) {
if (it->second.length() !=
static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
- DVLOG(1) << "Invalid key length: " << key_string.length();
+ DVLOG(1) << "Invalid key length: " << it->second.length();
promise->reject(INVALID_ACCESS_ERROR, 0, "Invalid key length.");
return;
}
- if (!AddDecryptionKey(web_session_id, it->first, it->second)) {
+
+ // If this key_id doesn't currently exist in this session,
+ // a new key is added.
+ if (!HasKey(session_id, it->first))
+ key_added = true;
+
+ if (!AddDecryptionKey(session_id, it->first, it->second)) {
promise->reject(INVALID_ACCESS_ERROR, 0, "Unable to add key.");
return;
}
@@ -329,26 +372,39 @@ void AesDecryptor::UpdateSession(const std::string& web_session_id,
promise->resolve();
- // Assume that at least 1 new key has been successfully added and thus
- // sending true.
- session_keys_change_cb_.Run(web_session_id, true);
+ // Create the list of all available keys for this session.
+ CdmKeysInfo keys_info;
+ {
+ base::AutoLock auto_lock(key_map_lock_);
+ for (const auto& item : key_map_) {
+ if (item.second->Contains(session_id)) {
+ scoped_ptr<CdmKeyInformation> key_info(new CdmKeyInformation);
+ key_info->key_id.assign(item.first.begin(), item.first.end());
+ key_info->status = CdmKeyInformation::USABLE;
+ key_info->system_code = 0;
+ keys_info.push_back(key_info.release());
+ }
+ }
+ }
+
+ session_keys_change_cb_.Run(session_id, key_added, keys_info.Pass());
}
-void AesDecryptor::CloseSession(const std::string& web_session_id,
+void AesDecryptor::CloseSession(const std::string& session_id,
scoped_ptr<SimpleCdmPromise> promise) {
// Validate that this is a reference to an active session and then forget it.
- std::set<std::string>::iterator it = valid_sessions_.find(web_session_id);
+ std::set<std::string>::iterator it = valid_sessions_.find(session_id);
DCHECK(it != valid_sessions_.end());
valid_sessions_.erase(it);
// Close the session.
- DeleteKeysForSession(web_session_id);
+ DeleteKeysForSession(session_id);
promise->resolve();
- session_closed_cb_.Run(web_session_id);
+ session_closed_cb_.Run(session_id);
}
-void AesDecryptor::RemoveSession(const std::string& web_session_id,
+void AesDecryptor::RemoveSession(const std::string& session_id,
scoped_ptr<SimpleCdmPromise> promise) {
// AesDecryptor doesn't keep any persistent data, so this should be
// NOT_REACHED().
@@ -359,40 +415,25 @@ void AesDecryptor::RemoveSession(const std::string& web_session_id,
// session, if it exists.
// TODO(jrummell): Remove the close() call when prefixed EME is removed.
// http://crbug.com/249976.
- if (valid_sessions_.find(web_session_id) != valid_sessions_.end()) {
- CloseSession(web_session_id, promise.Pass());
+ if (valid_sessions_.find(session_id) != valid_sessions_.end()) {
+ CloseSession(session_id, promise.Pass());
return;
}
promise->reject(INVALID_ACCESS_ERROR, 0, "Session does not exist.");
}
-void AesDecryptor::GetUsableKeyIds(const std::string& web_session_id,
- scoped_ptr<KeyIdsPromise> promise) {
- // Since |web_session_id| is not provided by the user, this should never
- // happen.
- DCHECK(valid_sessions_.find(web_session_id) != valid_sessions_.end());
-
- KeyIdsVector keyids;
- base::AutoLock auto_lock(key_map_lock_);
- for (KeyIdToSessionKeysMap::iterator it = key_map_.begin();
- it != key_map_.end();
- ++it) {
- if (it->second->Contains(web_session_id))
- keyids.push_back(std::vector<uint8>(it->first.begin(), it->first.end()));
- }
- promise->resolve(keyids);
+CdmContext* AesDecryptor::GetCdmContext() {
+ return this;
}
Decryptor* AesDecryptor::GetDecryptor() {
return this;
}
-#if defined(ENABLE_BROWSER_CDMS)
int AesDecryptor::GetCdmId() const {
return kInvalidCdmId;
}
-#endif // defined(ENABLE_BROWSER_CDMS)
void AesDecryptor::RegisterNewKeyCB(StreamType stream_type,
const NewKeyCB& new_key_cb) {
@@ -479,7 +520,7 @@ void AesDecryptor::DeinitializeDecoder(StreamType stream_type) {
NOTREACHED() << "AesDecryptor does not support audio/video decoding";
}
-bool AesDecryptor::AddDecryptionKey(const std::string& web_session_id,
+bool AesDecryptor::AddDecryptionKey(const std::string& session_id,
const std::string& key_id,
const std::string& key_string) {
scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
@@ -491,14 +532,14 @@ bool AesDecryptor::AddDecryptionKey(const std::string& web_session_id,
base::AutoLock auto_lock(key_map_lock_);
KeyIdToSessionKeysMap::iterator key_id_entry = key_map_.find(key_id);
if (key_id_entry != key_map_.end()) {
- key_id_entry->second->Insert(web_session_id, decryption_key.Pass());
+ key_id_entry->second->Insert(session_id, decryption_key.Pass());
return true;
}
// |key_id| not found, so need to create new entry.
scoped_ptr<SessionIdDecryptionKeyMap> inner_map(
new SessionIdDecryptionKeyMap());
- inner_map->Insert(web_session_id, decryption_key.Pass());
+ inner_map->Insert(session_id, decryption_key.Pass());
key_map_.add(key_id, inner_map.Pass());
return true;
}
@@ -514,15 +555,25 @@ AesDecryptor::DecryptionKey* AesDecryptor::GetKey(
return key_id_found->second->LatestDecryptionKey();
}
-void AesDecryptor::DeleteKeysForSession(const std::string& web_session_id) {
+bool AesDecryptor::HasKey(const std::string& session_id,
+ const std::string& key_id) {
+ base::AutoLock auto_lock(key_map_lock_);
+ KeyIdToSessionKeysMap::const_iterator key_id_found = key_map_.find(key_id);
+ if (key_id_found == key_map_.end())
+ return false;
+
+ return key_id_found->second->Contains(session_id);
+}
+
+void AesDecryptor::DeleteKeysForSession(const std::string& session_id) {
base::AutoLock auto_lock(key_map_lock_);
- // Remove all keys associated with |web_session_id|. Since the data is
+ // Remove all keys associated with |session_id|. Since the data is
// optimized for access in GetKey(), we need to look at each entry in
// |key_map_|.
KeyIdToSessionKeysMap::iterator it = key_map_.begin();
while (it != key_map_.end()) {
- it->second->Erase(web_session_id);
+ it->second->Erase(session_id);
if (it->second->Empty()) {
// Need to get rid of the entry for this key_id. This will mess up the
// iterator, so we need to increment it first.
diff --git a/chromium/media/cdm/aes_decryptor.h b/chromium/media/cdm/aes_decryptor.h
index edae707b73a..278be9534b8 100644
--- a/chromium/media/cdm/aes_decryptor.h
+++ b/chromium/media/cdm/aes_decryptor.h
@@ -7,16 +7,20 @@
#include <set>
#include <string>
+#include <vector>
#include "base/basictypes.h"
#include "base/containers/scoped_ptr_hash_map.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
+#include "media/base/cdm_context.h"
#include "media/base/decryptor.h"
#include "media/base/media_export.h"
#include "media/base/media_keys.h"
+class GURL;
+
namespace crypto {
class SymmetricKey;
}
@@ -25,38 +29,39 @@ namespace media {
// Decrypts an AES encrypted buffer into an unencrypted buffer. The AES
// encryption must be CTR with a key size of 128bits.
-class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
+class MEDIA_EXPORT AesDecryptor : public MediaKeys,
+ public CdmContext,
+ public Decryptor {
public:
- AesDecryptor(const SessionMessageCB& session_message_cb,
+ AesDecryptor(const GURL& security_origin,
+ const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
const SessionKeysChangeCB& session_keys_change_cb);
~AesDecryptor() override;
// MediaKeys implementation.
- void SetServerCertificate(const uint8* certificate_data,
- int certificate_data_length,
+ void SetServerCertificate(const std::vector<uint8_t>& certificate,
scoped_ptr<SimpleCdmPromise> promise) override;
- void CreateSession(const std::string& init_data_type,
- const uint8* init_data,
- int init_data_length,
- SessionType session_type,
- scoped_ptr<NewSessionCdmPromise> promise) override;
- void LoadSession(const std::string& web_session_id,
+ void CreateSessionAndGenerateRequest(
+ SessionType session_type,
+ EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ scoped_ptr<NewSessionCdmPromise> promise) override;
+ void LoadSession(SessionType session_type,
+ const std::string& session_id,
scoped_ptr<NewSessionCdmPromise> promise) override;
- void UpdateSession(const std::string& web_session_id,
- const uint8* response,
- int response_length,
+ void UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
scoped_ptr<SimpleCdmPromise> promise) override;
- void CloseSession(const std::string& web_session_id,
+ void CloseSession(const std::string& session_id,
scoped_ptr<SimpleCdmPromise> promise) override;
- void RemoveSession(const std::string& web_session_id,
+ void RemoveSession(const std::string& session_id,
scoped_ptr<SimpleCdmPromise> promise) override;
- void GetUsableKeyIds(const std::string& web_session_id,
- scoped_ptr<KeyIdsPromise> promise) override;
+ CdmContext* GetCdmContext() override;
+
+ // CdmContext implementation.
Decryptor* GetDecryptor() override;
-#if defined(ENABLE_BROWSER_CDMS)
- virtual int GetCdmId() const override;
-#endif // defined(ENABLE_BROWSER_CDMS)
+ int GetCdmId() const override;
// Decryptor implementation.
void RegisterNewKeyCB(StreamType stream_type,
@@ -107,12 +112,13 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
class SessionIdDecryptionKeyMap;
// Key ID <-> SessionIdDecryptionKeyMap map.
- typedef base::ScopedPtrHashMap<std::string, SessionIdDecryptionKeyMap>
+ typedef base::ScopedPtrHashMap<std::string,
+ scoped_ptr<SessionIdDecryptionKeyMap>>
KeyIdToSessionKeysMap;
// Creates a DecryptionKey using |key_string| and associates it with |key_id|.
// Returns true if successful.
- bool AddDecryptionKey(const std::string& web_session_id,
+ bool AddDecryptionKey(const std::string& session_id,
const std::string& key_id,
const std::string& key_string);
@@ -120,8 +126,11 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
// the key. Returns NULL if no key is associated with |key_id|.
DecryptionKey* GetKey(const std::string& key_id) const;
- // Deletes all keys associated with |web_session_id|.
- void DeleteKeysForSession(const std::string& web_session_id);
+ // Determines if |key_id| is already specified for |session_id|.
+ bool HasKey(const std::string& session_id, const std::string& key_id);
+
+ // Deletes all keys associated with |session_id|.
+ void DeleteKeysForSession(const std::string& session_id);
// Callbacks for firing session events.
SessionMessageCB session_message_cb_;
@@ -137,9 +146,9 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
// Keeps track of current valid sessions.
std::set<std::string> valid_sessions_;
- // Make web session ID unique per renderer by making it static. Web session
+ // Make session ID unique per renderer by making it static. Session
// IDs seen by the app will be "1", "2", etc.
- static uint32 next_web_session_id_;
+ static uint32_t next_session_id_;
NewKeyCB new_audio_key_cb_;
NewKeyCB new_video_key_cb_;
diff --git a/chromium/media/cdm/aes_decryptor_unittest.cc b/chromium/media/cdm/aes_decryptor_unittest.cc
index 1a228d496e1..6fd8cec7873 100644
--- a/chromium/media/cdm/aes_decryptor_unittest.cc
+++ b/chromium/media/cdm/aes_decryptor_unittest.cc
@@ -10,12 +10,14 @@
#include "base/json/json_reader.h"
#include "base/values.h"
#include "media/base/cdm_callback_promise.h"
+#include "media/base/cdm_key_information.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/mock_filters.h"
#include "media/cdm/aes_decryptor.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "url/gurl.h"
using ::testing::_;
using ::testing::Gt;
@@ -23,6 +25,7 @@ using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SaveArg;
using ::testing::StrNe;
+using ::testing::Unused;
MATCHER(IsEmpty, "") { return arg.empty(); }
MATCHER(IsNotEmpty, "") { return !arg.empty(); }
@@ -32,8 +35,6 @@ MATCHER(IsJSONDictionary, "") {
return (root.get() && root->GetType() == base::Value::TYPE_DICTIONARY);
}
-class GURL;
-
namespace media {
const uint8 kOriginalData[] = "Original subsample data.";
@@ -55,6 +56,7 @@ const char kKeyAsJWK[] =
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAw\","
" \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
@@ -68,6 +70,7 @@ const char kKeyAlternateAsJWK[] =
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAw\","
" \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
" }"
@@ -79,6 +82,7 @@ const char kWrongKeyAsJWK[] =
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAw\","
" \"k\": \"7u7u7u7u7u7u7u7u7u7u7g\""
" }"
@@ -90,6 +94,7 @@ const char kWrongSizedKeyAsJWK[] =
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAw\","
" \"k\": \"AAECAw\""
" }"
@@ -136,6 +141,7 @@ const char kKey2AsJWK[] =
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
" \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
" }"
@@ -208,7 +214,8 @@ enum PromiseResult { RESOLVED, REJECTED };
class AesDecryptorTest : public testing::Test {
public:
AesDecryptorTest()
- : decryptor_(base::Bind(&AesDecryptorTest::OnSessionMessage,
+ : decryptor_(GURL::EmptyGURL(),
+ base::Bind(&AesDecryptorTest::OnSessionMessage,
base::Unretained(this)),
base::Bind(&AesDecryptorTest::OnSessionClosed,
base::Unretained(this)),
@@ -231,29 +238,22 @@ class AesDecryptorTest : public testing::Test {
protected:
void OnResolveWithSession(PromiseResult expected_result,
- const std::string& web_session_id) {
+ const std::string& session_id) {
EXPECT_EQ(expected_result, RESOLVED) << "Unexpectedly resolved.";
- EXPECT_GT(web_session_id.length(), 0ul);
- web_session_id_ = web_session_id;
+ EXPECT_GT(session_id.length(), 0ul);
+ session_id_ = session_id;
}
void OnResolve(PromiseResult expected_result) {
EXPECT_EQ(expected_result, RESOLVED) << "Unexpectedly resolved.";
}
- void OnResolveWithUsableKeyIds(PromiseResult expected_result,
- uint32 expected_count,
- const KeyIdsVector& useable_key_ids) {
- EXPECT_EQ(expected_result, RESOLVED) << "Unexpectedly resolved.";
- EXPECT_EQ(expected_count, useable_key_ids.size());
- useable_key_ids_ = useable_key_ids;
- }
-
void OnReject(PromiseResult expected_result,
MediaKeys::Exception exception_code,
uint32 system_code,
const std::string& error_message) {
- EXPECT_EQ(expected_result, REJECTED) << "Unexpectedly rejected.";
+ EXPECT_EQ(expected_result, REJECTED)
+ << "Unexpectedly rejected with message: " << error_message;
}
scoped_ptr<SimpleCdmPromise> CreatePromise(PromiseResult expected_result) {
@@ -280,34 +280,17 @@ class AesDecryptorTest : public testing::Test {
return promise.Pass();
}
- scoped_ptr<KeyIdsPromise> CreateUsableKeyIdsPromise(
- PromiseResult expected_result,
- uint32 expected_count) {
- scoped_ptr<KeyIdsPromise> promise(new CdmCallbackPromise<KeyIdsVector>(
- base::Bind(&AesDecryptorTest::OnResolveWithUsableKeyIds,
- base::Unretained(this),
- expected_result,
- expected_count),
- base::Bind(&AesDecryptorTest::OnReject,
- base::Unretained(this),
- expected_result)));
- return promise.Pass();
- }
-
// Creates a new session using |key_id|. Returns the session ID.
std::string CreateSession(const std::vector<uint8>& key_id) {
DCHECK(!key_id.empty());
- EXPECT_CALL(*this,
- OnSessionMessage(
- IsNotEmpty(), IsJSONDictionary(), GURL::EmptyGURL()));
- decryptor_.CreateSession(std::string(),
- &key_id[0],
- key_id.size(),
- MediaKeys::TEMPORARY_SESSION,
- CreateSessionPromise(RESOLVED));
+ EXPECT_CALL(*this, OnSessionMessage(IsNotEmpty(), _, IsJSONDictionary(),
+ GURL::EmptyGURL()));
+ decryptor_.CreateSessionAndGenerateRequest(MediaKeys::TEMPORARY_SESSION,
+ EmeInitDataType::WEBM, key_id,
+ CreateSessionPromise(RESOLVED));
// This expects the promise to be called synchronously, which is the case
// for AesDecryptor.
- return web_session_id_;
+ return session_id_;
}
// Closes the session specified by |session_id|.
@@ -325,37 +308,40 @@ class AesDecryptorTest : public testing::Test {
decryptor_.RemoveSession(session_id, CreatePromise(RESOLVED));
}
+ MOCK_METHOD2(OnSessionKeysChangeCalled,
+ void(const std::string& session_id,
+ bool has_additional_usable_key));
+
+ void OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info) {
+ keys_info_.swap(keys_info);
+ OnSessionKeysChangeCalled(session_id, has_additional_usable_key);
+ }
+
// Updates the session specified by |session_id| with |key|. |result|
// tests that the update succeeds or generates an error.
void UpdateSessionAndExpect(std::string session_id,
const std::string& key,
- PromiseResult expected_result) {
+ PromiseResult expected_result,
+ bool new_key_expected) {
DCHECK(!key.empty());
if (expected_result == RESOLVED) {
- EXPECT_CALL(*this, OnSessionKeysChange(session_id, true));
+ EXPECT_CALL(*this,
+ OnSessionKeysChangeCalled(session_id, new_key_expected));
} else {
- EXPECT_CALL(*this, OnSessionKeysChange(_, _)).Times(0);
+ EXPECT_CALL(*this, OnSessionKeysChangeCalled(_, _)).Times(0);
}
decryptor_.UpdateSession(session_id,
- reinterpret_cast<const uint8*>(key.c_str()),
- key.length(),
+ std::vector<uint8>(key.begin(), key.end()),
CreatePromise(expected_result));
}
- void GetUsableKeyIdsAndExpect(const std::string& session_id,
- PromiseResult expected_result,
- uint32 expected_count) {
- decryptor_.GetUsableKeyIds(
- session_id, CreateUsableKeyIdsPromise(expected_result, expected_count));
- }
-
- bool UsableKeyIdsContains(std::vector<uint8> expected) {
- for (KeyIdsVector::iterator it = useable_key_ids_.begin();
- it != useable_key_ids_.end();
- ++it) {
- if (*it == expected)
+ bool KeysInfoContains(std::vector<uint8> expected) {
+ for (const auto& key_id : keys_info_) {
+ if (key_id->key_id == expected)
return true;
}
return false;
@@ -420,22 +406,17 @@ class AesDecryptorTest : public testing::Test {
}
}
- MOCK_METHOD3(OnSessionMessage,
- void(const std::string& web_session_id,
+ MOCK_METHOD4(OnSessionMessage,
+ void(const std::string& session_id,
+ MediaKeys::MessageType message_type,
const std::vector<uint8>& message,
- const GURL& destination_url));
- MOCK_METHOD2(OnSessionKeysChange,
- void(const std::string& web_session_id,
- bool has_additional_usable_key));
- MOCK_METHOD1(OnSessionClosed, void(const std::string& web_session_id));
+ const GURL& legacy_destination_url));
+ MOCK_METHOD1(OnSessionClosed, void(const std::string& session_id));
AesDecryptor decryptor_;
AesDecryptor::DecryptCB decrypt_cb_;
- std::string web_session_id_;
-
- // Copy of the vector from the last successful call to
- // OnResolveWithUsableKeyIds().
- KeyIdsVector useable_key_ids_;
+ std::string session_id_;
+ CdmKeysInfo keys_info_;
// Constants for testing.
const std::vector<uint8> original_data_;
@@ -449,43 +430,70 @@ class AesDecryptorTest : public testing::Test {
TEST_F(AesDecryptorTest, CreateSessionWithNullInitData) {
EXPECT_CALL(*this,
- OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
- decryptor_.CreateSession(std::string(),
- NULL,
- 0,
- MediaKeys::TEMPORARY_SESSION,
- CreateSessionPromise(RESOLVED));
+ OnSessionMessage(IsNotEmpty(), _, IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSessionAndGenerateRequest(
+ MediaKeys::TEMPORARY_SESSION, EmeInitDataType::WEBM, std::vector<uint8>(),
+ CreateSessionPromise(RESOLVED));
}
TEST_F(AesDecryptorTest, MultipleCreateSession) {
EXPECT_CALL(*this,
- OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
- decryptor_.CreateSession(std::string(),
- NULL,
- 0,
- MediaKeys::TEMPORARY_SESSION,
- CreateSessionPromise(RESOLVED));
+ OnSessionMessage(IsNotEmpty(), _, IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSessionAndGenerateRequest(
+ MediaKeys::TEMPORARY_SESSION, EmeInitDataType::WEBM, std::vector<uint8>(),
+ CreateSessionPromise(RESOLVED));
EXPECT_CALL(*this,
- OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
- decryptor_.CreateSession(std::string(),
- NULL,
- 0,
- MediaKeys::TEMPORARY_SESSION,
- CreateSessionPromise(RESOLVED));
+ OnSessionMessage(IsNotEmpty(), _, IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSessionAndGenerateRequest(
+ MediaKeys::TEMPORARY_SESSION, EmeInitDataType::WEBM, std::vector<uint8>(),
+ CreateSessionPromise(RESOLVED));
EXPECT_CALL(*this,
- OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
- decryptor_.CreateSession(std::string(),
- NULL,
- 0,
- MediaKeys::TEMPORARY_SESSION,
- CreateSessionPromise(RESOLVED));
+ OnSessionMessage(IsNotEmpty(), _, IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSessionAndGenerateRequest(
+ MediaKeys::TEMPORARY_SESSION, EmeInitDataType::WEBM, std::vector<uint8>(),
+ CreateSessionPromise(RESOLVED));
+}
+
+TEST_F(AesDecryptorTest, CreateSessionWithCencInitData) {
+ const uint8 init_data[] = {
+ 0x00, 0x00, 0x00, 0x44, // size = 68
+ 0x70, 0x73, 0x73, 0x68, // 'pssh'
+ 0x01, // version
+ 0x00, 0x00, 0x00, // flags
+ 0x10, 0x77, 0xEF, 0xEC, 0xC0, 0xB2, 0x4D, 0x02, // SystemID
+ 0xAC, 0xE3, 0x3C, 0x1E, 0x52, 0xE2, 0xFB, 0x4B,
+ 0x00, 0x00, 0x00, 0x02, // key count
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03, // key1
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03,
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04, // key2
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04,
+ 0x00, 0x00, 0x00, 0x00 // datasize
+ };
+ EXPECT_CALL(*this, OnSessionMessage(IsNotEmpty(), _, IsJSONDictionary(),
+ GURL::EmptyGURL()));
+ decryptor_.CreateSessionAndGenerateRequest(
+ MediaKeys::TEMPORARY_SESSION, EmeInitDataType::CENC,
+ std::vector<uint8>(init_data, init_data + arraysize(init_data)),
+ CreateSessionPromise(RESOLVED));
+}
+
+TEST_F(AesDecryptorTest, CreateSessionWithKeyIdsInitData) {
+ const char init_data[] =
+ "{\"kids\":[\"AQI\",\"AQIDBA\",\"AQIDBAUGBwgJCgsMDQ4PEA\"]}";
+
+ EXPECT_CALL(*this, OnSessionMessage(IsNotEmpty(), _, IsJSONDictionary(),
+ GURL::EmptyGURL()));
+ decryptor_.CreateSessionAndGenerateRequest(
+ MediaKeys::TEMPORARY_SESSION, EmeInitDataType::KEYIDS,
+ std::vector<uint8>(init_data, init_data + arraysize(init_data) - 1),
+ CreateSessionPromise(RESOLVED));
}
TEST_F(AesDecryptorTest, NormalDecryption) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
@@ -500,7 +508,7 @@ TEST_F(AesDecryptorTest, UnencryptedFrame) {
TEST_F(AesDecryptorTest, WrongKey) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, RESOLVED, true);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
@@ -518,29 +526,29 @@ TEST_F(AesDecryptorTest, KeyReplacement) {
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, RESOLVED, true);
ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
encrypted_buffer, original_data_, DATA_MISMATCH));
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, false);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
}
TEST_F(AesDecryptorTest, WrongSizedKey) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kWrongSizedKeyAsJWK, REJECTED);
+ UpdateSessionAndExpect(session_id, kWrongSizedKeyAsJWK, REJECTED, true);
}
TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
- UpdateSessionAndExpect(session_id, kKey2AsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKey2AsJWK, RESOLVED, true);
// The first key is still available after we added a second key.
ASSERT_NO_FATAL_FAILURE(
@@ -562,7 +570,7 @@ TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
TEST_F(AesDecryptorTest, CorruptedIv) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
std::vector<uint8> bad_iv = iv_;
bad_iv[1]++;
@@ -575,7 +583,7 @@ TEST_F(AesDecryptorTest, CorruptedIv) {
TEST_F(AesDecryptorTest, CorruptedData) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
std::vector<uint8> bad_data = encrypted_data_;
bad_data[1]++;
@@ -587,7 +595,7 @@ TEST_F(AesDecryptorTest, CorruptedData) {
TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, std::vector<uint8>(), no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
@@ -595,7 +603,7 @@ TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
TEST_F(AesDecryptorTest, SubsampleDecryption) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
subsample_encrypted_data_, key_id_, iv_, normal_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
@@ -606,7 +614,7 @@ TEST_F(AesDecryptorTest, SubsampleDecryption) {
// disallow such a configuration, it should be covered.
TEST_F(AesDecryptorTest, SubsampleDecryptionWithOffset) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
subsample_encrypted_data_, key_id_, iv_, normal_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
@@ -614,7 +622,7 @@ TEST_F(AesDecryptorTest, SubsampleDecryptionWithOffset) {
TEST_F(AesDecryptorTest, SubsampleWrongSize) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
std::vector<SubsampleEntry> subsample_entries_wrong_size(
kSubsampleEntriesWrongSize,
@@ -627,7 +635,7 @@ TEST_F(AesDecryptorTest, SubsampleWrongSize) {
TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
std::vector<SubsampleEntry> subsample_entries_invalid_total_size(
kSubsampleEntriesInvalidTotalSize,
@@ -643,7 +651,7 @@ TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
// No cypher bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
std::vector<SubsampleEntry> clear_only_subsample_entries(
kSubsampleEntriesClearOnly,
@@ -657,7 +665,7 @@ TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
// No clear bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleCypherBytesOnly) {
std::string session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
std::vector<SubsampleEntry> cypher_only_subsample_entries(
kSubsampleEntriesCypherOnly,
@@ -673,7 +681,7 @@ TEST_F(AesDecryptorTest, CloseSession) {
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
@@ -687,7 +695,7 @@ TEST_F(AesDecryptorTest, RemoveSession) {
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
@@ -699,7 +707,7 @@ TEST_F(AesDecryptorTest, NoKeyAfterCloseSession) {
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
@@ -714,13 +722,13 @@ TEST_F(AesDecryptorTest, LatestKeyUsed) {
encrypted_data_, key_id_, iv_, no_subsample_entries_);
// Add alternate key, buffer should not be decoded properly.
- UpdateSessionAndExpect(session_id1, kKeyAlternateAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id1, kKeyAlternateAsJWK, RESOLVED, true);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH));
// Create a second session with a correct key value for key_id_.
std::string session_id2 = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id2, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id2, kKeyAsJWK, RESOLVED, true);
// Should be able to decode with latest key.
ASSERT_NO_FATAL_FAILURE(
@@ -731,13 +739,13 @@ TEST_F(AesDecryptorTest, LatestKeyUsedAfterCloseSession) {
std::string session_id1 = CreateSession(key_id_);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id1, kKeyAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id1, kKeyAsJWK, RESOLVED, true);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
// Create a second session with a different key value for key_id_.
std::string session_id2 = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id2, kKeyAlternateAsJWK, RESOLVED);
+ UpdateSessionAndExpect(session_id2, kKeyAlternateAsJWK, RESOLVED, true);
// Should not be able to decode with new key.
ASSERT_NO_FATAL_FAILURE(
@@ -756,10 +764,11 @@ TEST_F(AesDecryptorTest, JWKKey) {
const std::string kJwkSimple =
"{"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
" \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
"}";
- UpdateSessionAndExpect(session_id, kJwkSimple, REJECTED);
+ UpdateSessionAndExpect(session_id, kJwkSimple, REJECTED, true);
// Try a key list with multiple entries.
const std::string kJwksMultipleEntries =
@@ -767,48 +776,52 @@ TEST_F(AesDecryptorTest, JWKKey) {
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
" \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
" },"
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"JCUmJygpKissLS4vMA\","
- " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA\""
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4_QA\""
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksMultipleEntries, RESOLVED);
+ UpdateSessionAndExpect(session_id, kJwksMultipleEntries, RESOLVED, true);
// Try a key with no spaces and some \n plus additional fields.
const std::string kJwksNoSpaces =
"\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
- "\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM\",\"k\":\"GawgguFyGrWKav7AX4VKUg"
+ "\"kid\":\"AQIDBAUGBwgJCgsMCg4PAA\",\"k\":\"GawgguFyGrWKav7AX4VKUg"
"\",\"foo\":\"bar\"}]}\n\n";
- UpdateSessionAndExpect(session_id, kJwksNoSpaces, RESOLVED);
+ UpdateSessionAndExpect(session_id, kJwksNoSpaces, RESOLVED, true);
// Try some non-ASCII characters.
- UpdateSessionAndExpect(
- session_id, "This is not ASCII due to \xff\xfe\xfd in it.", REJECTED);
+ UpdateSessionAndExpect(session_id,
+ "This is not ASCII due to \xff\xfe\xfd in it.",
+ REJECTED, true);
// Try a badly formatted key. Assume that the JSON parser is fully tested,
// so we won't try a lot of combinations. However, need a test to ensure
// that the code doesn't crash if invalid JSON received.
- UpdateSessionAndExpect(session_id, "This is not a JSON key.", REJECTED);
+ UpdateSessionAndExpect(session_id, "This is not a JSON key.", REJECTED, true);
// Try passing some valid JSON that is not a dictionary at the top level.
- UpdateSessionAndExpect(session_id, "40", REJECTED);
+ UpdateSessionAndExpect(session_id, "40", REJECTED, true);
// Try an empty dictionary.
- UpdateSessionAndExpect(session_id, "{ }", REJECTED);
+ UpdateSessionAndExpect(session_id, "{ }", REJECTED, true);
// Try an empty 'keys' dictionary.
- UpdateSessionAndExpect(session_id, "{ \"keys\": [] }", REJECTED);
+ UpdateSessionAndExpect(session_id, "{ \"keys\": [] }", REJECTED, true);
// Try with 'keys' not a dictionary.
- UpdateSessionAndExpect(session_id, "{ \"keys\":\"1\" }", REJECTED);
+ UpdateSessionAndExpect(session_id, "{ \"keys\":\"1\" }", REJECTED, true);
// Try with 'keys' a list of integers.
- UpdateSessionAndExpect(session_id, "{ \"keys\": [ 1, 2, 3 ] }", REJECTED);
+ UpdateSessionAndExpect(session_id, "{ \"keys\": [ 1, 2, 3 ] }", REJECTED,
+ true);
// Try padding(=) at end of 'k' base64 string.
const std::string kJwksWithPaddedKey =
@@ -816,12 +829,13 @@ TEST_F(AesDecryptorTest, JWKKey) {
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAw\","
" \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithPaddedKey, REJECTED);
+ UpdateSessionAndExpect(session_id, kJwksWithPaddedKey, REJECTED, true);
// Try padding(=) at end of 'kid' base64 string.
const std::string kJwksWithPaddedKeyId =
@@ -829,12 +843,13 @@ TEST_F(AesDecryptorTest, JWKKey) {
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"AAECAw==\","
" \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithPaddedKeyId, REJECTED);
+ UpdateSessionAndExpect(session_id, kJwksWithPaddedKeyId, REJECTED, true);
// Try a key with invalid base64 encoding.
const std::string kJwksWithInvalidBase64 =
@@ -842,12 +857,13 @@ TEST_F(AesDecryptorTest, JWKKey) {
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"!@#$%^&*()\","
" \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithInvalidBase64, REJECTED);
+ UpdateSessionAndExpect(session_id, kJwksWithInvalidBase64, REJECTED, true);
// Try a 3-byte 'kid' where no base64 padding is required.
// |kJwksMultipleEntries| above has 2 'kid's that require 1 and 2 padding
@@ -857,12 +873,13 @@ TEST_F(AesDecryptorTest, JWKKey) {
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"Kiss\","
" \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithNoPadding, RESOLVED);
+ UpdateSessionAndExpect(session_id, kJwksWithNoPadding, RESOLVED, true);
// Empty key id.
const std::string kJwksWithEmptyKeyId =
@@ -870,12 +887,13 @@ TEST_F(AesDecryptorTest, JWKKey) {
" \"keys\": ["
" {"
" \"kty\": \"oct\","
+ " \"alg\": \"A128KW\","
" \"kid\": \"\","
" \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithEmptyKeyId, REJECTED);
+ UpdateSessionAndExpect(session_id, kJwksWithEmptyKeyId, REJECTED, true);
CloseSession(session_id);
}
@@ -884,21 +902,37 @@ TEST_F(AesDecryptorTest, GetKeyIds) {
std::vector<uint8> key_id2(kKeyId2, kKeyId2 + arraysize(kKeyId2));
std::string session_id = CreateSession(key_id_);
- GetUsableKeyIdsAndExpect(session_id, RESOLVED, 0);
- EXPECT_FALSE(UsableKeyIdsContains(key_id1));
- EXPECT_FALSE(UsableKeyIdsContains(key_id2));
+ EXPECT_FALSE(KeysInfoContains(key_id1));
+ EXPECT_FALSE(KeysInfoContains(key_id2));
- // Add 1 key, verify ID is returned.
- UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
- GetUsableKeyIdsAndExpect(session_id, RESOLVED, 1);
- EXPECT_TRUE(UsableKeyIdsContains(key_id1));
- EXPECT_FALSE(UsableKeyIdsContains(key_id2));
+ // Add 1 key, verify it is returned.
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
+ EXPECT_TRUE(KeysInfoContains(key_id1));
+ EXPECT_FALSE(KeysInfoContains(key_id2));
// Add second key, verify both IDs returned.
- UpdateSessionAndExpect(session_id, kKey2AsJWK, RESOLVED);
- GetUsableKeyIdsAndExpect(session_id, RESOLVED, 2);
- EXPECT_TRUE(UsableKeyIdsContains(key_id1));
- EXPECT_TRUE(UsableKeyIdsContains(key_id2));
+ UpdateSessionAndExpect(session_id, kKey2AsJWK, RESOLVED, true);
+ EXPECT_TRUE(KeysInfoContains(key_id1));
+ EXPECT_TRUE(KeysInfoContains(key_id2));
+}
+
+TEST_F(AesDecryptorTest, NoKeysChangeForSameKey) {
+ std::vector<uint8> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+
+ std::string session_id = CreateSession(key_id_);
+ EXPECT_FALSE(KeysInfoContains(key_id));
+
+ // Add key, verify it is returned.
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, true);
+ EXPECT_TRUE(KeysInfoContains(key_id));
+
+ // Add key a second time.
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED, false);
+ EXPECT_TRUE(KeysInfoContains(key_id));
+
+ // Create a new session. Add key, should indicate key added for this session.
+ std::string session_id2 = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id2, kKeyAsJWK, RESOLVED, true);
}
} // namespace media
diff --git a/chromium/media/cdm/cenc_utils.cc b/chromium/media/cdm/cenc_utils.cc
new file mode 100644
index 00000000000..86779b21740
--- /dev/null
+++ b/chromium/media/cdm/cenc_utils.cc
@@ -0,0 +1,180 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/cenc_utils.h"
+
+#include "media/base/bit_reader.h"
+
+namespace media {
+
+// The initialization data for encrypted media files using the ISO Common
+// Encryption ('cenc') protection scheme may contain one or more protection
+// system specific header ('pssh') boxes.
+// ref: https://w3c.github.io/encrypted-media/cenc-format.html
+//
+// The format of a 'pssh' box is as follows:
+// unsigned int(32) size;
+// unsigned int(32) type = "pssh";
+// if (size==1) {
+// unsigned int(64) largesize;
+// } else if (size==0) {
+// -- box extends to end of file
+// }
+// unsigned int(8) version;
+// bit(24) flags;
+// unsigned int(8)[16] SystemID;
+// if (version > 0)
+// {
+// unsigned int(32) KID_count;
+// {
+// unsigned int(8)[16] KID;
+// } [KID_count]
+// }
+// unsigned int(32) DataSize;
+// unsigned int(8)[DataSize] Data;
+
+// Minimum size of a 'pssh' box includes all the required fields (size, type,
+// version, flags, SystemID, DataSize).
+const int kMinimumBoxSizeInBytes = 32;
+
+// SystemID for the Common System.
+// https://w3c.github.io/encrypted-media/cenc-format.html#common-system
+const uint8_t kCommonSystemId[] = { 0x10, 0x77, 0xef, 0xec,
+ 0xc0, 0xb2, 0x4d, 0x02,
+ 0xac, 0xe3, 0x3c, 0x1e,
+ 0x52, 0xe2, 0xfb, 0x4b };
+
+#define RCHECK(x) \
+ do { \
+ if (!(x)) \
+ return false; \
+ } while (0)
+
+// Helper function to read up to 32 bits from a bit stream.
+static uint32_t ReadBits(BitReader* reader, int num_bits) {
+ DCHECK_GE(reader->bits_available(), num_bits);
+ DCHECK((num_bits > 0) && (num_bits <= 32));
+ uint32_t value;
+ reader->ReadBits(num_bits, &value);
+ return value;
+}
+
+// Checks whether the next 16 bytes matches the Common SystemID.
+// Assumes |reader| has enough data.
+static bool IsCommonSystemID(BitReader* reader) {
+ for (uint32_t i = 0; i < arraysize(kCommonSystemId); ++i) {
+ if (ReadBits(reader, 8) != kCommonSystemId[i])
+ return false;
+ }
+ return true;
+}
+
+// Checks that |reader| contains a valid 'ppsh' box header. |reader| is updated
+// to point to the content immediately following the box header. Returns true
+// if the header looks valid and |reader| contains enough data for the size of
+// header. |size| is updated as the computed size of the box header. Otherwise
+// false is returned.
+static bool ValidBoxHeader(BitReader* reader, uint32* size) {
+ // Enough data for a miniumum size 'pssh' box?
+ uint32 available_bytes = reader->bits_available() / 8;
+ RCHECK(available_bytes >= kMinimumBoxSizeInBytes);
+
+ *size = ReadBits(reader, 32);
+
+ // Must be a 'pssh' box or else fail.
+ RCHECK(ReadBits(reader, 8) == 'p');
+ RCHECK(ReadBits(reader, 8) == 's');
+ RCHECK(ReadBits(reader, 8) == 's');
+ RCHECK(ReadBits(reader, 8) == 'h');
+
+ if (*size == 1) {
+ // If largesize > 2**32 it is too big.
+ RCHECK(ReadBits(reader, 32) == 0);
+ *size = ReadBits(reader, 32);
+ } else if (*size == 0) {
+ *size = available_bytes;
+ }
+
+ // Check that the buffer contains at least size bytes.
+ return available_bytes >= *size;
+}
+
+bool ValidatePsshInput(const std::vector<uint8_t>& input) {
+ size_t offset = 0;
+ while (offset < input.size()) {
+ // Create a BitReader over the remaining part of the buffer.
+ BitReader reader(&input[offset], input.size() - offset);
+ uint32 size;
+ RCHECK(ValidBoxHeader(&reader, &size));
+
+ // Update offset to point at the next 'pssh' box (may not be one).
+ offset += size;
+ }
+
+ // Only valid if this contains 0 or more 'pssh' boxes.
+ return offset == input.size();
+}
+
+bool GetKeyIdsForCommonSystemId(const std::vector<uint8_t>& input,
+ KeyIdList* key_ids) {
+ size_t offset = 0;
+ KeyIdList result;
+
+ while (offset < input.size()) {
+ BitReader reader(&input[offset], input.size() - offset);
+ uint32 size;
+ RCHECK(ValidBoxHeader(&reader, &size));
+
+ // Update offset to point at the next 'pssh' box (may not be one).
+ offset += size;
+
+ // Check the version, as KIDs only available if version > 0.
+ uint8_t version = ReadBits(&reader, 8);
+ if (version == 0)
+ continue;
+
+ // flags must be 0. If not, assume incorrect 'pssh' box and move to the
+ // next one.
+ if (ReadBits(&reader, 24) != 0)
+ continue;
+
+ // Validate SystemID
+ RCHECK(static_cast<uint32_t>(reader.bits_available()) >=
+ arraysize(kCommonSystemId) * 8);
+ if (!IsCommonSystemID(&reader))
+ continue; // Not Common System, so try the next pssh box.
+
+ // Since version > 0, next field is the KID_count.
+ RCHECK(static_cast<uint32_t>(reader.bits_available()) >=
+ sizeof(uint32_t) * 8);
+ uint32_t count = ReadBits(&reader, 32);
+
+ if (count == 0)
+ continue;
+
+ // Make sure there is enough data for all the KIDs specified, and then
+ // extract them.
+ RCHECK(static_cast<uint32_t>(reader.bits_available()) > count * 16 * 8);
+ while (count > 0) {
+ std::vector<uint8_t> key;
+ key.reserve(16);
+ for (int i = 0; i < 16; ++i) {
+ key.push_back(ReadBits(&reader, 8));
+ }
+ result.push_back(key);
+ --count;
+ }
+
+ // Don't bother checking DataSize and Data.
+ }
+
+ key_ids->swap(result);
+
+ // TODO(jrummell): This should return true only if there was at least one
+ // key ID present. However, numerous test files don't contain the 'pssh' box
+ // for Common Format, so no keys are found. http://crbug.com/460308
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/cenc_utils.h b/chromium/media/cdm/cenc_utils.h
new file mode 100644
index 00000000000..ec85fb34e8b
--- /dev/null
+++ b/chromium/media/cdm/cenc_utils.h
@@ -0,0 +1,33 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_CENC_UTILS_H_
+#define MEDIA_CDM_CENC_UTILS_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "media/cdm/json_web_key.h"
+
+namespace media {
+
+// Validate that |input| is a set of concatenated 'pssh' boxes and the sizes
+// match. Returns true if |input| looks valid, false otherwise.
+MEDIA_EXPORT bool ValidatePsshInput(const std::vector<uint8_t>& input);
+
+// Gets the Key Ids from a 'pssh' box for the Common SystemID among one or
+// more concatenated 'pssh' boxes. If |input| looks valid, then true is
+// returned and |key_ids| is updated to contain the values found. Otherwise
+// return false.
+// TODO(jrummell): This returns true if no Common SystemID 'pssh' boxes are
+// found, or are included but don't contain any key IDs. This should be
+// fixed once the test files are updated to include correct 'pssh' boxes.
+// http://crbug.com/460308
+MEDIA_EXPORT bool GetKeyIdsForCommonSystemId(const std::vector<uint8_t>& input,
+ KeyIdList* key_ids);
+
+} // namespace media
+
+#endif // MEDIA_CDM_CENC_UTILS_H_
diff --git a/chromium/media/cdm/cenc_utils_unittest.cc b/chromium/media/cdm/cenc_utils_unittest.cc
new file mode 100644
index 00000000000..992361976d9
--- /dev/null
+++ b/chromium/media/cdm/cenc_utils_unittest.cc
@@ -0,0 +1,376 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/cenc_utils.h"
+
+#include "base/logging.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+const uint8_t kKey1Data[] = {
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03,
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03
+};
+const uint8_t kKey2Data[] = {
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04,
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04,
+};
+const uint8_t kKey3Data[] = {
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x05,
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x05,
+};
+const uint8_t kKey4Data[] = {
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x06,
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x06,
+};
+
+class CencUtilsTest : public testing::Test {
+ public:
+ CencUtilsTest()
+ : key1_(kKey1Data, kKey1Data + arraysize(kKey1Data)),
+ key2_(kKey2Data, kKey2Data + arraysize(kKey2Data)),
+ key3_(kKey3Data, kKey3Data + arraysize(kKey3Data)),
+ key4_(kKey4Data, kKey4Data + arraysize(kKey4Data)) {}
+
+ protected:
+ // Initialize the start of the 'pssh' box (up to key_count)
+ void InitializePSSHBox(std::vector<uint8_t>* box,
+ uint8_t size,
+ uint8_t version) {
+ DCHECK(box->size() == 0);
+
+ box->reserve(size);
+ // Add size.
+ box->push_back(0);
+ box->push_back(0);
+ box->push_back(0);
+ box->push_back(size);
+ // Add 'pssh'.
+ box->push_back('p');
+ box->push_back('s');
+ box->push_back('s');
+ box->push_back('h');
+ // Add version.
+ box->push_back(version);
+ // Add flags.
+ box->push_back(0);
+ box->push_back(0);
+ box->push_back(0);
+ // Add Clear Key SystemID.
+ box->push_back(0x10);
+ box->push_back(0x77);
+ box->push_back(0xEF);
+ box->push_back(0xEC);
+ box->push_back(0xC0);
+ box->push_back(0xB2);
+ box->push_back(0x4D);
+ box->push_back(0x02);
+ box->push_back(0xAC);
+ box->push_back(0xE3);
+ box->push_back(0x3C);
+ box->push_back(0x1E);
+ box->push_back(0x52);
+ box->push_back(0xE2);
+ box->push_back(0xFB);
+ box->push_back(0x4B);
+ }
+
+ std::vector<uint8_t> MakePSSHBox(uint8_t version) {
+ std::vector<uint8_t> box;
+ uint8_t size = (version == 0) ? 32 : 36;
+ InitializePSSHBox(&box, size, version);
+ if (version > 0) {
+ // Add key_count (= 0).
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ }
+ // Add data_size (= 0).
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ return box;
+ }
+
+ std::vector<uint8_t> MakePSSHBox(uint8_t version,
+ const std::vector<uint8_t>& key1) {
+ DCHECK(version > 0);
+ DCHECK(key1.size() == 16);
+
+ std::vector<uint8_t> box;
+ uint8_t size = 52;
+ InitializePSSHBox(&box, size, version);
+
+ // Add key_count (= 1).
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(1);
+
+ // Add key1.
+ for (int i = 0; i < 16; ++i)
+ box.push_back(key1[i]);
+
+ // Add data_size (= 0).
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ return box;
+ }
+
+ std::vector<uint8_t> MakePSSHBox(uint8_t version,
+ const std::vector<uint8_t>& key1,
+ const std::vector<uint8_t>& key2) {
+ DCHECK(version > 0);
+ DCHECK(key1.size() == 16);
+ DCHECK(key2.size() == 16);
+
+ std::vector<uint8_t> box;
+ uint8_t size = 68;
+ InitializePSSHBox(&box, size, version);
+
+ // Add key_count (= 2).
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(2);
+
+ // Add key1.
+ for (int i = 0; i < 16; ++i)
+ box.push_back(key1[i]);
+
+ // Add key2.
+ for (int i = 0; i < 16; ++i)
+ box.push_back(key2[i]);
+
+ // Add data_size (= 0).
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ box.push_back(0);
+ return box;
+ }
+
+ const std::vector<uint8_t>& Key1() { return key1_; }
+ const std::vector<uint8_t>& Key2() { return key2_; }
+ const std::vector<uint8_t>& Key3() { return key3_; }
+ const std::vector<uint8_t>& Key4() { return key4_; }
+
+ private:
+ std::vector<uint8_t> key1_;
+ std::vector<uint8_t> key2_;
+ std::vector<uint8_t> key3_;
+ std::vector<uint8_t> key4_;
+};
+
+TEST_F(CencUtilsTest, EmptyPSSH) {
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(std::vector<uint8_t>()));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(std::vector<uint8_t>(), &key_ids));
+ EXPECT_EQ(0u, key_ids.size());
+}
+
+TEST_F(CencUtilsTest, PSSHVersion0) {
+ std::vector<uint8_t> box = MakePSSHBox(0);
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(box));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box, &key_ids));
+ EXPECT_EQ(0u, key_ids.size());
+}
+
+TEST_F(CencUtilsTest, PSSHVersion1WithNoKeys) {
+ std::vector<uint8_t> box = MakePSSHBox(1);
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(box));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box, &key_ids));
+ EXPECT_EQ(0u, key_ids.size());
+}
+
+TEST_F(CencUtilsTest, PSSHVersion1WithOneKey) {
+ std::vector<uint8_t> box = MakePSSHBox(1, Key1());
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(box));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box, &key_ids));
+ EXPECT_EQ(1u, key_ids.size());
+ EXPECT_EQ(key_ids[0], Key1());
+}
+
+TEST_F(CencUtilsTest, PSSHVersion1WithTwoKeys) {
+ std::vector<uint8_t> box = MakePSSHBox(1, Key1(), Key2());
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(box));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box, &key_ids));
+ EXPECT_EQ(2u, key_ids.size());
+ EXPECT_EQ(key_ids[0], Key1());
+ EXPECT_EQ(key_ids[1], Key2());
+}
+
+TEST_F(CencUtilsTest, PSSHVersion0Plus1) {
+ std::vector<uint8_t> box0 = MakePSSHBox(0);
+ std::vector<uint8_t> box1 = MakePSSHBox(1, Key1());
+
+ // Concatentate box1 into box0.
+ for (const auto& value : box1)
+ box0.push_back(value);
+
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(box0));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box0, &key_ids));
+ EXPECT_EQ(1u, key_ids.size());
+ EXPECT_EQ(key_ids[0], Key1());
+}
+
+TEST_F(CencUtilsTest, PSSHVersion1Plus0) {
+ std::vector<uint8_t> box0 = MakePSSHBox(0);
+ std::vector<uint8_t> box1 = MakePSSHBox(1, Key1());
+
+ // Concatentate box0 into box1.
+ for (const auto& value : box0)
+ box1.push_back(value);
+
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(box1));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box1, &key_ids));
+ EXPECT_EQ(1u, key_ids.size());
+ EXPECT_EQ(key_ids[0], Key1());
+}
+
+TEST_F(CencUtilsTest, MultiplePSSHVersion1) {
+ std::vector<uint8_t> box = MakePSSHBox(1, Key1(), Key2());
+ std::vector<uint8_t> box1 = MakePSSHBox(1, Key3());
+ std::vector<uint8_t> box2 = MakePSSHBox(1, Key4());
+
+ // Concatentate box1 into box.
+ for (const auto& value : box1)
+ box.push_back(value);
+ // Concatentate box2 into box.
+ for (const auto& value : box2)
+ box.push_back(value);
+
+ KeyIdList key_ids;
+ EXPECT_TRUE(ValidatePsshInput(box));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box, &key_ids));
+ EXPECT_EQ(4u, key_ids.size());
+ EXPECT_EQ(key_ids[0], Key1());
+ EXPECT_EQ(key_ids[1], Key2());
+ EXPECT_EQ(key_ids[2], Key3());
+ EXPECT_EQ(key_ids[3], Key4());
+}
+
+TEST_F(CencUtilsTest, InvalidPSSH) {
+ std::vector<uint8_t> box = MakePSSHBox(1, Key1(), Key2());
+ KeyIdList key_ids;
+ for (uint32 i = 1; i < box.size(); ++i) {
+ // Modify size of data passed to be less than real size.
+ std::vector<uint8_t> truncated(&box[0], &box[0] + i);
+ EXPECT_FALSE(ValidatePsshInput(truncated));
+ EXPECT_FALSE(GetKeyIdsForCommonSystemId(truncated, &key_ids));
+ // Modify starting point.
+ std::vector<uint8_t> changed_offset(&box[i], &box[i] + box.size() - i);
+ EXPECT_FALSE(ValidatePsshInput(changed_offset));
+ EXPECT_FALSE(GetKeyIdsForCommonSystemId(changed_offset, &key_ids));
+ }
+}
+
+TEST_F(CencUtilsTest, InvalidSystemID) {
+ std::vector<uint8_t> box = MakePSSHBox(1, Key1(), Key2());
+
+ // Modify the System ID.
+ ++box[20];
+
+ KeyIdList key_ids;
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box, &key_ids));
+ EXPECT_EQ(0u, key_ids.size());
+}
+
+TEST_F(CencUtilsTest, InvalidFlags) {
+ std::vector<uint8_t> box = MakePSSHBox(1, Key1(), Key2());
+
+ // Modify flags.
+ box[10] = 3;
+
+ KeyIdList key_ids;
+ // TODO(jrummell): This should fail as the 'pssh' box is skipped.
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(box, &key_ids));
+ EXPECT_EQ(0u, key_ids.size());
+}
+
+TEST_F(CencUtilsTest, LongSize) {
+ const uint8_t data[] = {
+ 0x00, 0x00, 0x00, 0x01, // size = 1
+ 0x70, 0x73, 0x73, 0x68, // 'pssh'
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, // longsize
+ 0x01, // version
+ 0x00, 0x00, 0x00, // flags
+ 0x10, 0x77, 0xEF, 0xEC, 0xC0, 0xB2, 0x4D, 0x02, // SystemID
+ 0xAC, 0xE3, 0x3C, 0x1E, 0x52, 0xE2, 0xFB, 0x4B,
+ 0x00, 0x00, 0x00, 0x02, // key count
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03, // key1
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03,
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04, // key2
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04,
+ 0x00, 0x00, 0x00, 0x00 // datasize
+ };
+
+ KeyIdList key_ids;
+ EXPECT_TRUE(
+ ValidatePsshInput(std::vector<uint8_t>(data, data + arraysize(data))));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(
+ std::vector<uint8_t>(data, data + arraysize(data)), &key_ids));
+ EXPECT_EQ(2u, key_ids.size());
+}
+
+TEST_F(CencUtilsTest, NoSize) {
+ const uint8_t data[] = {
+ 0x00, 0x00, 0x00, 0x00, // size = 0
+ 0x70, 0x73, 0x73, 0x68, // 'pssh'
+ 0x01, // version
+ 0x00, 0x00, 0x00, // flags
+ 0x10, 0x77, 0xEF, 0xEC, 0xC0, 0xB2, 0x4D, 0x02, // SystemID
+ 0xAC, 0xE3, 0x3C, 0x1E, 0x52, 0xE2, 0xFB, 0x4B,
+ 0x00, 0x00, 0x00, 0x02, // key count
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03, // key1
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03,
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04, // key2
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04,
+ 0x00, 0x00, 0x00, 0x00 // datasize
+ };
+
+ KeyIdList key_ids;
+ EXPECT_TRUE(
+ ValidatePsshInput(std::vector<uint8_t>(data, data + arraysize(data))));
+ EXPECT_TRUE(GetKeyIdsForCommonSystemId(
+ std::vector<uint8_t>(data, data + arraysize(data)), &key_ids));
+ EXPECT_EQ(2u, key_ids.size());
+}
+
+TEST_F(CencUtilsTest, HugeSize) {
+ const uint8_t data[] = {
+ 0x00, 0x00, 0x00, 0x01, // size = 1
+ 0x70, 0x73, 0x73, 0x68, // 'pssh'
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // longsize = big
+ 0x01, // version
+ 0x00, 0x00, 0x00, // flags
+ 0x10, 0x77, 0xEF, 0xEC, 0xC0, 0xB2, 0x4D, 0x02, // SystemID
+ 0xAC, 0xE3, 0x3C, 0x1E, 0x52, 0xE2, 0xFB, 0x4B,
+ 0x00, 0x00, 0x00, 0x02, // key count
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03, // key1
+ 0x7E, 0x57, 0x1D, 0x03, 0x7E, 0x57, 0x1D, 0x03,
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04, // key2
+ 0x7E, 0x57, 0x1D, 0x04, 0x7E, 0x57, 0x1D, 0x04,
+ 0x00, 0x00, 0x00, 0x00 // datasize
+ };
+
+ KeyIdList key_ids;
+ EXPECT_FALSE(
+ ValidatePsshInput(std::vector<uint8_t>(data, data + arraysize(data))));
+ EXPECT_FALSE(GetKeyIdsForCommonSystemId(
+ std::vector<uint8_t>(data, data + arraysize(data)), &key_ids));
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/default_cdm_factory.cc b/chromium/media/cdm/default_cdm_factory.cc
new file mode 100644
index 00000000000..c025fe982db
--- /dev/null
+++ b/chromium/media/cdm/default_cdm_factory.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/default_cdm_factory.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
+#include "media/base/key_systems.h"
+#include "media/cdm/aes_decryptor.h"
+#include "url/gurl.h"
+
+namespace media {
+
+DefaultCdmFactory::DefaultCdmFactory() {
+}
+
+DefaultCdmFactory::~DefaultCdmFactory() {
+}
+
+void DefaultCdmFactory::Create(
+ const std::string& key_system,
+ const GURL& security_origin,
+ const CdmConfig& cdm_config,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb,
+ const CdmCreatedCB& cdm_created_cb) {
+ if (!security_origin.is_valid()) {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(cdm_created_cb, nullptr, "Invalid origin."));
+ return;
+ }
+ if (!CanUseAesDecryptor(key_system)) {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(cdm_created_cb, nullptr, "Unsupported key system."));
+ return;
+ }
+
+ scoped_ptr<MediaKeys> cdm(
+ new AesDecryptor(security_origin, session_message_cb, session_closed_cb,
+ session_keys_change_cb));
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(cdm_created_cb, base::Passed(&cdm), ""));
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/default_cdm_factory.h b/chromium/media/cdm/default_cdm_factory.h
new file mode 100644
index 00000000000..78d4f2f50b0
--- /dev/null
+++ b/chromium/media/cdm/default_cdm_factory.h
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_DEFAULT_CDM_FACTORY_H_
+#define MEDIA_CDM_DEFAULT_CDM_FACTORY_H_
+
+#include "base/macros.h"
+#include "media/base/cdm_factory.h"
+
+namespace media {
+
+struct CdmConfig;
+
+class DefaultCdmFactory : public CdmFactory {
+ public:
+ DefaultCdmFactory();
+ ~DefaultCdmFactory() final;
+
+ // CdmFactory implementation.
+ void Create(const std::string& key_system,
+ const GURL& security_origin,
+ const CdmConfig& cdm_config,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb,
+ const CdmCreatedCB& cdm_created_cb) final;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DefaultCdmFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_DEFAULT_CDM_FACTORY_H_
diff --git a/chromium/media/cdm/json_web_key.cc b/chromium/media/cdm/json_web_key.cc
index 03a8a1a7055..0576f369009 100644
--- a/chromium/media/cdm/json_web_key.cc
+++ b/chromium/media/cdm/json_web_key.cc
@@ -10,6 +10,7 @@
#include "base/json/string_escape.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/values.h"
@@ -17,17 +18,23 @@ namespace media {
const char kKeysTag[] = "keys";
const char kKeyTypeTag[] = "kty";
-const char kSymmetricKeyValue[] = "oct";
+const char kKeyTypeOct[] = "oct"; // Octet sequence.
const char kKeyTag[] = "k";
const char kKeyIdTag[] = "kid";
const char kKeyIdsTag[] = "kids";
const char kBase64Padding = '=';
+const char kBase64Plus[] = "+";
+const char kBase64UrlPlusReplacement[] = "-";
+const char kBase64Slash[] = "/";
+const char kBase64UrlSlashReplacement[] = "_";
+const char kBase64UrlInvalid[] = "+/=";
const char kTypeTag[] = "type";
-const char kPersistentType[] = "persistent";
-const char kTemporaryType[] = "temporary";
+const char kTemporarySession[] = "temporary";
+const char kPersistentLicenseSession[] = "persistent-license";
+const char kPersistentReleaseMessageSession[] = "persistent-release-message";
-// Encodes |input| into a base64 string without padding.
-static std::string EncodeBase64(const uint8* input, int input_length) {
+// Encodes |input| into a base64url string without padding.
+static std::string EncodeBase64Url(const uint8* input, int input_length) {
std::string encoded_text;
base::Base64Encode(
std::string(reinterpret_cast<const char*>(input), input_length),
@@ -38,14 +45,23 @@ static std::string EncodeBase64(const uint8* input, int input_length) {
if (found != std::string::npos)
encoded_text.erase(found + 1);
+ // base64url encoding means the characters '-' and '_' must be used
+ // instead of '+' and '/', respectively.
+ base::ReplaceChars(encoded_text, kBase64Plus, kBase64UrlPlusReplacement,
+ &encoded_text);
+ base::ReplaceChars(encoded_text, kBase64Slash, kBase64UrlSlashReplacement,
+ &encoded_text);
+
return encoded_text;
}
-// Decodes an unpadded base64 string. Returns empty string on error.
-static std::string DecodeBase64(const std::string& encoded_text) {
- // EME spec doesn't allow padding characters.
- if (encoded_text.find_first_of(kBase64Padding) != std::string::npos)
+// Decodes a base64url string. Returns empty string on error.
+static std::string DecodeBase64Url(const std::string& encoded_text) {
+ // EME spec doesn't allow '+', '/', or padding characters.
+ if (encoded_text.find_first_of(kBase64UrlInvalid) != std::string::npos) {
+ DVLOG(1) << "Invalid base64url format: " << encoded_text;
return std::string();
+ }
// Since base::Base64Decode() requires padding characters, add them so length
// of |encoded_text| is exactly a multiple of 4.
@@ -54,28 +70,91 @@ static std::string DecodeBase64(const std::string& encoded_text) {
if (num_last_grouping_chars > 0)
modified_text.append(4 - num_last_grouping_chars, kBase64Padding);
+ // base64url encoding means the characters '-' and '_' must be used
+ // instead of '+' and '/', respectively, so replace them before calling
+ // base::Base64Decode().
+ base::ReplaceChars(modified_text, kBase64UrlPlusReplacement, kBase64Plus,
+ &modified_text);
+ base::ReplaceChars(modified_text, kBase64UrlSlashReplacement, kBase64Slash,
+ &modified_text);
+
std::string decoded_text;
- if (!base::Base64Decode(modified_text, &decoded_text))
+ if (!base::Base64Decode(modified_text, &decoded_text)) {
+ DVLOG(1) << "Base64 decoding failed on: " << modified_text;
return std::string();
+ }
return decoded_text;
}
+static std::string ShortenTo64Characters(const std::string& input) {
+ // Convert |input| into a string with escaped characters replacing any
+ // non-ASCII characters. Limiting |input| to the first 65 characters so
+ // we don't waste time converting a potentially long string and then
+ // throwing away the excess.
+ std::string escaped_str =
+ base::EscapeBytesAsInvalidJSONString(input.substr(0, 65), false);
+ if (escaped_str.length() <= 64u)
+ return escaped_str;
+
+ // This may end up truncating an escaped character, but the first part of
+ // the string should provide enough information.
+ return escaped_str.substr(0, 61).append("...");
+}
+
+static scoped_ptr<base::DictionaryValue> CreateJSONDictionary(
+ const uint8* key,
+ int key_length,
+ const uint8* key_id,
+ int key_id_length) {
+ scoped_ptr<base::DictionaryValue> jwk(new base::DictionaryValue());
+ jwk->SetString(kKeyTypeTag, kKeyTypeOct);
+ jwk->SetString(kKeyTag, EncodeBase64Url(key, key_length));
+ jwk->SetString(kKeyIdTag, EncodeBase64Url(key_id, key_id_length));
+ return jwk.Pass();
+}
+
std::string GenerateJWKSet(const uint8* key, int key_length,
const uint8* key_id, int key_id_length) {
- // Both |key| and |key_id| need to be base64 encoded strings in the JWK.
- std::string key_base64 = EncodeBase64(key, key_length);
- std::string key_id_base64 = EncodeBase64(key_id, key_id_length);
-
// Create the JWK, and wrap it into a JWK Set.
- scoped_ptr<base::DictionaryValue> jwk(new base::DictionaryValue());
- jwk->SetString(kKeyTypeTag, kSymmetricKeyValue);
- jwk->SetString(kKeyTag, key_base64);
- jwk->SetString(kKeyIdTag, key_id_base64);
scoped_ptr<base::ListValue> list(new base::ListValue());
- list->Append(jwk.release());
+ list->Append(
+ CreateJSONDictionary(key, key_length, key_id, key_id_length).release());
+ base::DictionaryValue jwk_set;
+ jwk_set.Set(kKeysTag, list.release());
+
+ // Finally serialize |jwk_set| into a string and return it.
+ std::string serialized_jwk;
+ JSONStringValueSerializer serializer(&serialized_jwk);
+ serializer.Serialize(jwk_set);
+ return serialized_jwk;
+}
+
+std::string GenerateJWKSet(const KeyIdAndKeyPairs& keys,
+ MediaKeys::SessionType session_type) {
+ scoped_ptr<base::ListValue> list(new base::ListValue());
+ for (const auto& key_pair : keys) {
+ list->Append(CreateJSONDictionary(
+ reinterpret_cast<const uint8*>(key_pair.second.data()),
+ key_pair.second.length(),
+ reinterpret_cast<const uint8*>(key_pair.first.data()),
+ key_pair.first.length())
+ .release());
+ }
+
base::DictionaryValue jwk_set;
jwk_set.Set(kKeysTag, list.release());
+ switch (session_type) {
+ case MediaKeys::TEMPORARY_SESSION:
+ jwk_set.SetString(kTypeTag, kTemporarySession);
+ break;
+ case MediaKeys::PERSISTENT_LICENSE_SESSION:
+ jwk_set.SetString(kTypeTag, kPersistentLicenseSession);
+ break;
+ case MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION:
+ jwk_set.SetString(kTypeTag, kPersistentReleaseMessageSession);
+ break;
+ }
// Finally serialize |jwk_set| into a string and return it.
std::string serialized_jwk;
@@ -88,10 +167,9 @@ std::string GenerateJWKSet(const uint8* key, int key_length,
// to the id/value pair and returns true on success.
static bool ConvertJwkToKeyPair(const base::DictionaryValue& jwk,
KeyIdAndKeyPair* jwk_key) {
- // Have found a JWK, start by checking that it is a symmetric key.
std::string type;
- if (!jwk.GetString(kKeyTypeTag, &type) || type != kSymmetricKeyValue) {
- DVLOG(1) << "JWK is not a symmetric key";
+ if (!jwk.GetString(kKeyTypeTag, &type) || type != kKeyTypeOct) {
+ DVLOG(1) << "Missing or invalid '" << kKeyTypeTag << "': " << type;
return false;
}
@@ -108,13 +186,13 @@ static bool ConvertJwkToKeyPair(const base::DictionaryValue& jwk,
}
// Key ID and key are base64-encoded strings, so decode them.
- std::string raw_key_id = DecodeBase64(encoded_key_id);
+ std::string raw_key_id = DecodeBase64Url(encoded_key_id);
if (raw_key_id.empty()) {
DVLOG(1) << "Invalid '" << kKeyIdTag << "' value: " << encoded_key_id;
return false;
}
- std::string raw_key = DecodeBase64(encoded_key);
+ std::string raw_key = DecodeBase64Url(encoded_key);
if (raw_key.empty()) {
DVLOG(1) << "Invalid '" << kKeyTag << "' value: " << encoded_key;
return false;
@@ -128,12 +206,16 @@ static bool ConvertJwkToKeyPair(const base::DictionaryValue& jwk,
bool ExtractKeysFromJWKSet(const std::string& jwk_set,
KeyIdAndKeyPairs* keys,
MediaKeys::SessionType* session_type) {
- if (!base::IsStringASCII(jwk_set))
+ if (!base::IsStringASCII(jwk_set)) {
+ DVLOG(1) << "Non ASCII JWK Set: " << jwk_set;
return false;
+ }
scoped_ptr<base::Value> root(base::JSONReader().ReadToValue(jwk_set));
- if (!root.get() || root->GetType() != base::Value::TYPE_DICTIONARY)
+ if (!root.get() || root->GetType() != base::Value::TYPE_DICTIONARY) {
+ DVLOG(1) << "Not valid JSON: " << jwk_set << ", root: " << root.get();
return false;
+ }
// Locate the set from the dictionary.
base::DictionaryValue* dictionary =
@@ -166,19 +248,21 @@ bool ExtractKeysFromJWKSet(const std::string& jwk_set,
// Successfully processed all JWKs in the set. Now check if "type" is
// specified.
base::Value* value = NULL;
- std::string type_id;
+ std::string session_type_id;
if (!dictionary->Get(kTypeTag, &value)) {
// Not specified, so use the default type.
*session_type = MediaKeys::TEMPORARY_SESSION;
- } else if (!value->GetAsString(&type_id)) {
+ } else if (!value->GetAsString(&session_type_id)) {
DVLOG(1) << "Invalid '" << kTypeTag << "' value";
return false;
- } else if (type_id == kPersistentType) {
- *session_type = MediaKeys::PERSISTENT_SESSION;
- } else if (type_id == kTemporaryType) {
+ } else if (session_type_id == kTemporarySession) {
*session_type = MediaKeys::TEMPORARY_SESSION;
+ } else if (session_type_id == kPersistentLicenseSession) {
+ *session_type = MediaKeys::PERSISTENT_LICENSE_SESSION;
+ } else if (session_type_id == kPersistentReleaseMessageSession) {
+ *session_type = MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION;
} else {
- DVLOG(1) << "Invalid '" << kTypeTag << "' value: " << type_id;
+ DVLOG(1) << "Invalid '" << kTypeTag << "' value: " << session_type_id;
return false;
}
@@ -187,22 +271,89 @@ bool ExtractKeysFromJWKSet(const std::string& jwk_set,
return true;
}
-void CreateLicenseRequest(const uint8* key_id,
- int key_id_length,
+bool ExtractKeyIdsFromKeyIdsInitData(const std::string& input,
+ KeyIdList* key_ids,
+ std::string* error_message) {
+ if (!base::IsStringASCII(input)) {
+ error_message->assign("Non ASCII: ");
+ error_message->append(ShortenTo64Characters(input));
+ return false;
+ }
+
+ scoped_ptr<base::Value> root(base::JSONReader().ReadToValue(input));
+ if (!root.get() || root->GetType() != base::Value::TYPE_DICTIONARY) {
+ error_message->assign("Not valid JSON: ");
+ error_message->append(ShortenTo64Characters(input));
+ return false;
+ }
+
+ // Locate the set from the dictionary.
+ base::DictionaryValue* dictionary =
+ static_cast<base::DictionaryValue*>(root.get());
+ base::ListValue* list_val = NULL;
+ if (!dictionary->GetList(kKeyIdsTag, &list_val)) {
+ error_message->assign("Missing '");
+ error_message->append(kKeyIdsTag);
+ error_message->append("' parameter or not a list");
+ return false;
+ }
+
+ // Create a local list of key ids, so that |key_ids| only gets updated on
+ // success.
+ KeyIdList local_key_ids;
+ for (size_t i = 0; i < list_val->GetSize(); ++i) {
+ std::string encoded_key_id;
+ if (!list_val->GetString(i, &encoded_key_id)) {
+ error_message->assign("'");
+ error_message->append(kKeyIdsTag);
+ error_message->append("'[");
+ error_message->append(base::UintToString(i));
+ error_message->append("] is not string.");
+ return false;
+ }
+
+ // Key ID is a base64-encoded string, so decode it.
+ std::string raw_key_id = DecodeBase64Url(encoded_key_id);
+ if (raw_key_id.empty()) {
+ error_message->assign("'");
+ error_message->append(kKeyIdsTag);
+ error_message->append("'[");
+ error_message->append(base::UintToString(i));
+ error_message->append("] is not valid base64url encoded. Value: ");
+ error_message->append(ShortenTo64Characters(encoded_key_id));
+ return false;
+ }
+
+ // Add the decoded key ID to the list.
+ local_key_ids.push_back(std::vector<uint8>(
+ raw_key_id.data(), raw_key_id.data() + raw_key_id.length()));
+ }
+
+ // All done.
+ key_ids->swap(local_key_ids);
+ error_message->clear();
+ return true;
+}
+
+void CreateLicenseRequest(const KeyIdList& key_ids,
MediaKeys::SessionType session_type,
std::vector<uint8>* license) {
// Create the license request.
scoped_ptr<base::DictionaryValue> request(new base::DictionaryValue());
scoped_ptr<base::ListValue> list(new base::ListValue());
- list->AppendString(EncodeBase64(key_id, key_id_length));
+ for (const auto& key_id : key_ids)
+ list->AppendString(EncodeBase64Url(&key_id[0], key_id.size()));
request->Set(kKeyIdsTag, list.release());
switch (session_type) {
case MediaKeys::TEMPORARY_SESSION:
- request->SetString(kTypeTag, kTemporaryType);
+ request->SetString(kTypeTag, kTemporarySession);
+ break;
+ case MediaKeys::PERSISTENT_LICENSE_SESSION:
+ request->SetString(kTypeTag, kPersistentLicenseSession);
break;
- case MediaKeys::PERSISTENT_SESSION:
- request->SetString(kTypeTag, kPersistentType);
+ case MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION:
+ request->SetString(kTypeTag, kPersistentReleaseMessageSession);
break;
}
@@ -216,17 +367,40 @@ void CreateLicenseRequest(const uint8* key_id,
license->swap(result);
}
+void CreateKeyIdsInitData(const KeyIdList& key_ids,
+ std::vector<uint8>* init_data) {
+ // Create the init_data.
+ scoped_ptr<base::DictionaryValue> dictionary(new base::DictionaryValue());
+ scoped_ptr<base::ListValue> list(new base::ListValue());
+ for (const auto& key_id : key_ids)
+ list->AppendString(EncodeBase64Url(&key_id[0], key_id.size()));
+ dictionary->Set(kKeyIdsTag, list.release());
+
+ // Serialize the dictionary as a string.
+ std::string json;
+ JSONStringValueSerializer serializer(&json);
+ serializer.Serialize(*dictionary);
+
+ // Convert the serialized data into std::vector and return it.
+ std::vector<uint8> result(json.begin(), json.end());
+ init_data->swap(result);
+}
+
bool ExtractFirstKeyIdFromLicenseRequest(const std::vector<uint8>& license,
std::vector<uint8>* first_key) {
const std::string license_as_str(
reinterpret_cast<const char*>(!license.empty() ? &license[0] : NULL),
license.size());
- if (!base::IsStringASCII(license_as_str))
+ if (!base::IsStringASCII(license_as_str)) {
+ DVLOG(1) << "Non ASCII license: " << license_as_str;
return false;
+ }
scoped_ptr<base::Value> root(base::JSONReader().ReadToValue(license_as_str));
- if (!root.get() || root->GetType() != base::Value::TYPE_DICTIONARY)
+ if (!root.get() || root->GetType() != base::Value::TYPE_DICTIONARY) {
+ DVLOG(1) << "Not valid JSON: " << license_as_str;
return false;
+ }
// Locate the set from the dictionary.
base::DictionaryValue* dictionary =
@@ -249,7 +423,7 @@ bool ExtractFirstKeyIdFromLicenseRequest(const std::vector<uint8>& license,
return false;
}
- std::string decoded_string = DecodeBase64(encoded_key);
+ std::string decoded_string = DecodeBase64Url(encoded_key);
if (decoded_string.empty()) {
DVLOG(1) << "Invalid '" << kKeyIdsTag << "' value: " << encoded_key;
return false;
diff --git a/chromium/media/cdm/json_web_key.h b/chromium/media/cdm/json_web_key.h
index af028f2fe24..8502ad92936 100644
--- a/chromium/media/cdm/json_web_key.h
+++ b/chromium/media/cdm/json_web_key.h
@@ -48,6 +48,9 @@ namespace media {
// Ref: http://tools.ietf.org/html/draft-ietf-jose-json-web-key and:
// http://tools.ietf.org/html/draft-jones-jose-json-private-and-symmetric-key
+// Vector of key IDs.
+typedef std::vector<std::vector<uint8>> KeyIdList;
+
// Vector of [key_id, key_value] pairs. Values are raw binary data, stored in
// strings for convenience.
typedef std::pair<std::string, std::string> KeyIdAndKeyPair;
@@ -57,6 +60,10 @@ typedef std::vector<KeyIdAndKeyPair> KeyIdAndKeyPairs;
MEDIA_EXPORT std::string GenerateJWKSet(const uint8* key, int key_length,
const uint8* key_id, int key_id_length);
+// Converts a set of |key|, |key_id| pairs to a JSON Web Key Set.
+MEDIA_EXPORT std::string GenerateJWKSet(const KeyIdAndKeyPairs& keys,
+ MediaKeys::SessionType session_type);
+
// Extracts the JSON Web Keys from a JSON Web Key Set. If |input| looks like
// a valid JWK Set, then true is returned and |keys| and |session_type| are
// updated to contain the values found. Otherwise return false.
@@ -64,15 +71,25 @@ MEDIA_EXPORT bool ExtractKeysFromJWKSet(const std::string& jwk_set,
KeyIdAndKeyPairs* keys,
MediaKeys::SessionType* session_type);
-// Create a license request message for the |key_id| and |session_type|
-// specified. Currently ClearKey generates a message for each key individually,
-// so no need to take a list of |key_id|'s. |license| is updated to contain the
-// resulting JSON string.
-MEDIA_EXPORT void CreateLicenseRequest(const uint8* key_id,
- int key_id_length,
+// Extracts the Key Ids from a Key IDs Initialization Data
+// (https://w3c.github.io/encrypted-media/keyids-format.html). If |input| looks
+// valid, then true is returned and |key_ids| is updated to contain the values
+// found. Otherwise return false and |error_message| contains the reason.
+MEDIA_EXPORT bool ExtractKeyIdsFromKeyIdsInitData(const std::string& input,
+ KeyIdList* key_ids,
+ std::string* error_message);
+
+// Creates a license request message for the |key_ids| and |session_type|
+// specified. |license| is updated to contain the resulting JSON string.
+MEDIA_EXPORT void CreateLicenseRequest(const KeyIdList& key_ids,
MediaKeys::SessionType session_type,
std::vector<uint8>* license);
+// Creates a keyIDs init_data message for the |key_ids| specified.
+// |key_ids_init_data| is updated to contain the resulting JSON string.
+MEDIA_EXPORT void CreateKeyIdsInitData(const KeyIdList& key_ids,
+ std::vector<uint8>* key_ids_init_data);
+
// Extract the first key from the license request message. Returns true if
// |license| is a valid license request and contains at least one key,
// otherwise false and |first_key| is not touched.
diff --git a/chromium/media/cdm/json_web_key_unittest.cc b/chromium/media/cdm/json_web_key_unittest.cc
index 49ea32bd456..90d19e30f20 100644
--- a/chromium/media/cdm/json_web_key_unittest.cc
+++ b/chromium/media/cdm/json_web_key_unittest.cc
@@ -4,6 +4,7 @@
#include "media/cdm/json_web_key.h"
+#include "base/base64.h"
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -44,7 +45,9 @@ class JSONWebKeyTest : public testing::Test {
MediaKeys::SessionType session_type,
const std::string& expected_result) {
std::vector<uint8> result;
- CreateLicenseRequest(key_id, key_id_length, session_type, &result);
+ KeyIdList key_ids;
+ key_ids.push_back(std::vector<uint8>(key_id, key_id + key_id_length));
+ CreateLicenseRequest(key_ids, session_type, &result);
std::string s(result.begin(), result.end());
EXPECT_EQ(expected_result, s);
}
@@ -57,11 +60,24 @@ class JSONWebKeyTest : public testing::Test {
std::vector<uint8> key;
EXPECT_EQ(expected_result,
ExtractFirstKeyIdFromLicenseRequest(license_vector, &key));
- if (expected_result) {
- std::vector<uint8> key_result(expected_key,
- expected_key + expected_key_length);
- EXPECT_EQ(key_result, key);
- }
+ if (expected_result)
+ VerifyKeyId(key, expected_key, expected_key_length);
+ }
+
+ void VerifyKeyId(std::vector<uint8> key,
+ const uint8* expected_key,
+ int expected_key_length) {
+ std::vector<uint8> key_result(expected_key,
+ expected_key + expected_key_length);
+ EXPECT_EQ(key_result, key);
+ }
+
+ KeyIdAndKeyPair MakeKeyIdAndKeyPair(const uint8* key,
+ int key_length,
+ const uint8* key_id,
+ int key_id_length) {
+ return std::make_pair(std::string(key_id, key_id + key_id_length),
+ std::string(key, key + key_length));
}
};
@@ -84,17 +100,47 @@ TEST_F(JSONWebKeyTest, GenerateJWKSet) {
"{\"keys\":[{\"k\":\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kid\":"
"\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kty\":\"oct\"}]}",
GenerateJWKSet(data3, arraysize(data3), data3, arraysize(data3)));
+
+ KeyIdAndKeyPairs keys;
+ keys.push_back(
+ MakeKeyIdAndKeyPair(data1, arraysize(data1), data1, arraysize(data1)));
+ EXPECT_EQ(
+ "{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}],\"type\":"
+ "\"temporary\"}",
+ GenerateJWKSet(keys, MediaKeys::TEMPORARY_SESSION));
+ keys.push_back(
+ MakeKeyIdAndKeyPair(data2, arraysize(data2), data2, arraysize(data2)));
+ EXPECT_EQ(
+ "{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"},{\"k\":"
+ "\"AQIDBA\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"}],\"type\":\"persistent-"
+ "license\"}",
+ GenerateJWKSet(keys, MediaKeys::PERSISTENT_LICENSE_SESSION));
+ keys.push_back(
+ MakeKeyIdAndKeyPair(data3, arraysize(data3), data3, arraysize(data3)));
+ EXPECT_EQ(
+ "{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"},{\"k\":"
+ "\"AQIDBA\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"},{\"k\":"
+ "\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kid\":\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kty\":"
+ "\"oct\"}],\"type\":\"persistent-release-message\"}",
+ GenerateJWKSet(keys, MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION));
}
-TEST_F(JSONWebKeyTest, ExtractJWKKeys) {
- // Try a simple JWK key (i.e. not in a set)
- const std::string kJwkSimple =
+TEST_F(JSONWebKeyTest, ExtractValidJWKKeys) {
+ // Try an empty 'keys' dictionary.
+ ExtractJWKKeysAndExpect("{ \"keys\": [] }", true, 0);
+
+ // Try a key list with one entry.
+ const std::string kJwksOneEntry =
"{"
- " \"kty\": \"oct\","
- " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
- " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
+ " }"
+ " ]"
"}";
- ExtractJWKKeysAndExpect(kJwkSimple, false, 0);
+ ExtractJWKKeysAndExpect(kJwksOneEntry, true, 1);
// Try a key list with multiple entries.
const std::string kJwksMultipleEntries =
@@ -108,7 +154,7 @@ TEST_F(JSONWebKeyTest, ExtractJWKKeys) {
" {"
" \"kty\": \"oct\","
" \"kid\": \"JCUmJygpKissLS4vMA\","
- " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA\""
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4_QA\""
" }"
" ]"
"}";
@@ -116,18 +162,47 @@ TEST_F(JSONWebKeyTest, ExtractJWKKeys) {
// Try a key with no spaces and some \n plus additional fields.
const std::string kJwksNoSpaces =
- "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
+ "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\","
"\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM\",\"k\":\"GawgguFyGrWKav7AX4VKUg"
"\",\"foo\":\"bar\"}]}\n\n";
ExtractJWKKeysAndExpect(kJwksNoSpaces, true, 1);
+ // Try a list with multiple keys with the same kid.
+ const std::string kJwksDuplicateKids =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"JCUmJygpKissLS4vMA\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
+ " },"
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"JCUmJygpKissLS4vMA\","
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4_QA\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksDuplicateKids, true, 2);
+}
+
+TEST_F(JSONWebKeyTest, ExtractInvalidJWKKeys) {
+ // Try a simple JWK key (i.e. not in a set)
+ const std::string kJwkSimple =
+ "{"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
+ "}";
+ ExtractJWKKeysAndExpect(kJwkSimple, false, 0);
+
// Try some non-ASCII characters.
ExtractJWKKeysAndExpect(
"This is not ASCII due to \xff\xfe\xfd in it.", false, 0);
// Try some non-ASCII characters in an otherwise valid JWK.
const std::string kJwksInvalidCharacters =
- "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
+ "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\","
"\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM\",\"k\":\"\xff\xfe\xfd"
"\",\"foo\":\"bar\"}]}\n\n";
ExtractJWKKeysAndExpect(kJwksInvalidCharacters, false, 0);
@@ -143,9 +218,6 @@ TEST_F(JSONWebKeyTest, ExtractJWKKeys) {
// Try an empty dictionary.
ExtractJWKKeysAndExpect("{ }", false, 0);
- // Try an empty 'keys' dictionary.
- ExtractJWKKeysAndExpect("{ \"keys\": [] }", true, 0);
-
// Try with 'keys' not a dictionary.
ExtractJWKKeysAndExpect("{ \"keys\":\"1\" }", false, 0);
@@ -203,50 +275,144 @@ TEST_F(JSONWebKeyTest, ExtractJWKKeys) {
" ]"
"}";
ExtractJWKKeysAndExpect(kJwksWithEmptyKeyId, false, 0);
+}
- // Try a list with multiple keys with the same kid.
- const std::string kJwksDuplicateKids =
+TEST_F(JSONWebKeyTest, KeyType) {
+ // Valid key type.
+ const std::string kJwksWithValidKeyType =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"JCUmJygpKissLS4vMA\","
- " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
- " },"
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithValidKeyType, true, 1);
+
+ // Empty key type.
+ const std::string kJwksWithEmptyKeyType =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithEmptyKeyType, false, 0);
+
+ // Key type is case sensitive.
+ const std::string kJwksWithUppercaseKeyType =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"OCT\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithUppercaseKeyType, false, 0);
+
+ // Wrong key type.
+ const std::string kJwksWithWrongKeyType =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"RSA\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithWrongKeyType, false, 0);
+}
+
+TEST_F(JSONWebKeyTest, Alg) {
+ // 'alg' is ignored, so verify that anything is allowed.
+ // Valid alg.
+ const std::string kJwksWithValidAlg =
+ "{"
+ " \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"JCUmJygpKissLS4vMA\","
- " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA\""
+ " \"alg\": \"A128KW\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
" ]"
"}";
- ExtractJWKKeysAndExpect(kJwksDuplicateKids, true, 2);
+ ExtractJWKKeysAndExpect(kJwksWithValidAlg, true, 1);
+
+ // Empty alg.
+ const std::string kJwksWithEmptyAlg =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"alg\": \"\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithEmptyAlg, true, 1);
+
+ // Alg is case sensitive.
+ const std::string kJwksWithLowercaseAlg =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"alg\": \"a128kw\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithLowercaseAlg, true, 1);
+
+ // Wrong alg.
+ const std::string kJwksWithWrongAlg =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"alg\": \"RS256\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithWrongAlg, true, 1);
}
TEST_F(JSONWebKeyTest, SessionType) {
ExtractSessionTypeAndExpect(
- "{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}]}",
- true,
+ "{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}]}", true,
MediaKeys::TEMPORARY_SESSION);
ExtractSessionTypeAndExpect(
"{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}],\"type\":"
"\"temporary\"}",
- true,
- MediaKeys::TEMPORARY_SESSION);
+ true, MediaKeys::TEMPORARY_SESSION);
ExtractSessionTypeAndExpect(
"{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}],\"type\":"
- "\"persistent\"}",
- true,
- MediaKeys::PERSISTENT_SESSION);
+ "\"persistent-license\"}",
+ true, MediaKeys::PERSISTENT_LICENSE_SESSION);
+ ExtractSessionTypeAndExpect(
+ "{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}],\"type\":"
+ "\"persistent-release-message\"}",
+ true, MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION);
ExtractSessionTypeAndExpect(
"{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}],\"type\":"
"\"unknown\"}",
- false,
- MediaKeys::TEMPORARY_SESSION);
+ false, MediaKeys::TEMPORARY_SESSION);
ExtractSessionTypeAndExpect(
"{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}],\"type\":3}",
- false,
- MediaKeys::TEMPORARY_SESSION);
+ false, MediaKeys::TEMPORARY_SESSION);
}
TEST_F(JSONWebKeyTest, CreateLicense) {
@@ -259,19 +425,20 @@ TEST_F(JSONWebKeyTest, CreateLicense) {
arraysize(data1),
MediaKeys::TEMPORARY_SESSION,
"{\"kids\":[\"AQI\"],\"type\":\"temporary\"}");
- CreateLicenseAndExpect(data1,
- arraysize(data1),
- MediaKeys::PERSISTENT_SESSION,
- "{\"kids\":[\"AQI\"],\"type\":\"persistent\"}");
+ CreateLicenseAndExpect(
+ data1, arraysize(data1), MediaKeys::PERSISTENT_LICENSE_SESSION,
+ "{\"kids\":[\"AQI\"],\"type\":\"persistent-license\"}");
+ CreateLicenseAndExpect(
+ data1, arraysize(data1), MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION,
+ "{\"kids\":[\"AQI\"],\"type\":\"persistent-release-message\"}");
CreateLicenseAndExpect(data2,
arraysize(data2),
MediaKeys::TEMPORARY_SESSION,
"{\"kids\":[\"AQIDBA\"],\"type\":\"temporary\"}");
- CreateLicenseAndExpect(
- data3,
- arraysize(data3),
- MediaKeys::PERSISTENT_SESSION,
- "{\"kids\":[\"AQIDBAUGBwgJCgsMDQ4PEA\"],\"type\":\"persistent\"}");
+ CreateLicenseAndExpect(data3, arraysize(data3),
+ MediaKeys::PERSISTENT_LICENSE_SESSION,
+ "{\"kids\":[\"AQIDBAUGBwgJCgsMDQ4PEA\"],\"type\":"
+ "\"persistent-license\"}");
}
TEST_F(JSONWebKeyTest, ExtractLicense) {
@@ -317,5 +484,157 @@ TEST_F(JSONWebKeyTest, ExtractLicense) {
ExtractKeyFromLicenseAndExpect("{\"kids\":[\"!@#$%^&*()\"]}", false, NULL, 0);
}
+TEST_F(JSONWebKeyTest, Base64UrlEncoding) {
+ const uint8 data1[] = { 0xfb, 0xfd, 0xfb, 0xfd, 0xfb, 0xfd, 0xfb };
+
+ // Verify that |data1| contains invalid base64url characters '+' and '/'
+ // and is padded with = when converted to base64.
+ std::string encoded_text;
+ base::Base64Encode(
+ std::string(reinterpret_cast<const char*>(&data1[0]), arraysize(data1)),
+ &encoded_text);
+ EXPECT_EQ(encoded_text, "+/37/fv9+w==");
+ EXPECT_NE(encoded_text.find('+'), std::string::npos);
+ EXPECT_NE(encoded_text.find('/'), std::string::npos);
+ EXPECT_NE(encoded_text.find('='), std::string::npos);
+
+ // base64url characters '-' and '_' not in base64 encoding.
+ EXPECT_EQ(encoded_text.find('-'), std::string::npos);
+ EXPECT_EQ(encoded_text.find('_'), std::string::npos);
+
+ CreateLicenseAndExpect(data1, arraysize(data1), MediaKeys::TEMPORARY_SESSION,
+ "{\"kids\":[\"-_37_fv9-w\"],\"type\":\"temporary\"}");
+
+ ExtractKeyFromLicenseAndExpect(
+ "{\"kids\":[\"-_37_fv9-w\"],\"type\":\"temporary\"}", true, data1,
+ arraysize(data1));
+}
+
+TEST_F(JSONWebKeyTest, MultipleKeys) {
+ const uint8 data1[] = { 0x01, 0x02 };
+ const uint8 data2[] = { 0x01, 0x02, 0x03, 0x04 };
+ const uint8 data3[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 };
+
+ std::vector<uint8> result;
+ KeyIdList key_ids;
+ key_ids.push_back(std::vector<uint8>(data1, data1 + arraysize(data1)));
+ key_ids.push_back(std::vector<uint8>(data2, data2 + arraysize(data2)));
+ key_ids.push_back(std::vector<uint8>(data3, data3 + arraysize(data3)));
+ CreateLicenseRequest(key_ids, MediaKeys::TEMPORARY_SESSION, &result);
+ std::string s(result.begin(), result.end());
+ EXPECT_EQ(
+ "{\"kids\":[\"AQI\",\"AQIDBA\",\"AQIDBAUGBwgJCgsMDQ4PEA\"],\"type\":"
+ "\"temporary\"}",
+ s);
+}
+
+TEST_F(JSONWebKeyTest, ExtractKeyIds) {
+ const uint8 data1[] = { 0x01, 0x02 };
+ const uint8 data2[] = { 0x01, 0x02, 0x03, 0x04 };
+ const uint8 data3[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 };
+
+ KeyIdList key_ids;
+ std::string error_message;
+
+ EXPECT_TRUE(ExtractKeyIdsFromKeyIdsInitData("{\"kids\":[\"AQI\"]}", &key_ids,
+ &error_message));
+ EXPECT_EQ(1u, key_ids.size());
+ EXPECT_EQ(0u, error_message.length());
+ VerifyKeyId(key_ids[0], data1, arraysize(data1));
+
+ EXPECT_TRUE(ExtractKeyIdsFromKeyIdsInitData(
+ "{\"kids\":[\"AQI\",\"AQIDBA\",\"AQIDBAUGBwgJCgsMDQ4PEA\"]}", &key_ids,
+ &error_message));
+ EXPECT_EQ(3u, key_ids.size());
+ EXPECT_EQ(0u, error_message.length());
+ VerifyKeyId(key_ids[0], data1, arraysize(data1));
+ VerifyKeyId(key_ids[1], data2, arraysize(data2));
+ VerifyKeyId(key_ids[2], data3, arraysize(data3));
+
+ // Expect failure when non-ascii.
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData(
+ "This is not ASCII due to \xff\xfe\x0a in it.", &key_ids,
+ &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message,
+ "Non ASCII: This is not ASCII due to \\u00FF\\u00FE\\n in it.");
+
+ // Expect failure when not JSON or not a dictionary.
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData("This is invalid.", &key_ids,
+ &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message, "Not valid JSON: This is invalid.");
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData("6", &key_ids, &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message, "Not valid JSON: 6");
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData(
+ "This is a very long string that is longer than 64 characters and is "
+ "invalid.",
+ &key_ids, &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message,
+ "Not valid JSON: This is a very long string that is longer than 64 "
+ "characters ...");
+
+ // Expect failure when "kids" not specified.
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData("{\"keys\":[\"AQI\"]}", &key_ids,
+ &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message, "Missing 'kids' parameter or not a list");
+
+ // Expect failure when invalid key_ids specified.
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData("{\"kids\":[1]}", &key_ids,
+ &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message, "'kids'[0] is not string.");
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData("{\"kids\": {\"id\":\"AQI\" }}",
+ &key_ids, &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message, "Missing 'kids' parameter or not a list");
+
+ // Expect failure when non-base64 key_ids specified.
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData("{\"kids\":[\"AQI+\"]}",
+ &key_ids, &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message,
+ "'kids'[0] is not valid base64url encoded. Value: AQI+");
+ EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData("{\"kids\":[\"AQI\",\"AQI/\"]}",
+ &key_ids, &error_message));
+ EXPECT_EQ(3u, key_ids.size()); // |key_ids| should be unchanged.
+ EXPECT_EQ(error_message,
+ "'kids'[1] is not valid base64url encoded. Value: AQI/");
+}
+
+TEST_F(JSONWebKeyTest, CreateInitData) {
+ const uint8 data1[] = { 0x01, 0x02 };
+ const uint8 data2[] = { 0x01, 0x02, 0x03, 0x04 };
+ const uint8 data3[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 };
+
+ KeyIdList key_ids;
+ std::string error_message;
+
+ key_ids.push_back(std::vector<uint8>(data1, data1 + arraysize(data1)));
+ std::vector<uint8> init_data1;
+ CreateKeyIdsInitData(key_ids, &init_data1);
+ std::string result1(init_data1.begin(), init_data1.end());
+ EXPECT_EQ(result1, "{\"kids\":[\"AQI\"]}");
+
+ key_ids.push_back(std::vector<uint8>(data2, data2 + arraysize(data2)));
+ std::vector<uint8> init_data2;
+ CreateKeyIdsInitData(key_ids, &init_data2);
+ std::string result2(init_data2.begin(), init_data2.end());
+ EXPECT_EQ(result2, "{\"kids\":[\"AQI\",\"AQIDBA\"]}");
+
+ key_ids.push_back(std::vector<uint8>(data3, data3 + arraysize(data3)));
+ std::vector<uint8> init_data3;
+ CreateKeyIdsInitData(key_ids, &init_data3);
+ std::string result3(init_data3.begin(), init_data3.end());
+ EXPECT_EQ(result3,
+ "{\"kids\":[\"AQI\",\"AQIDBA\",\"AQIDBAUGBwgJCgsMDQ4PEA\"]}");
+}
+
} // namespace media
diff --git a/chromium/media/cdm/key_system_names.cc b/chromium/media/cdm/key_system_names.cc
index 32b74754d69..1782203ae5f 100644
--- a/chromium/media/cdm/key_system_names.cc
+++ b/chromium/media/cdm/key_system_names.cc
@@ -4,15 +4,18 @@
#include "media/cdm/key_system_names.h"
-#include <string>
namespace media {
const char kClearKey[] = "org.w3.clearkey";
const char kExternalClearKey[] = "org.chromium.externalclearkey";
-static bool IsParentKeySystemOf(const std::string& parent_key_system,
- const std::string& key_system) {
+bool IsClearKey(const std::string& key_system) {
+ return key_system == kClearKey;
+}
+
+bool IsParentKeySystemOf(const std::string& parent_key_system,
+ const std::string& key_system) {
std::string prefix = parent_key_system + '.';
return key_system.substr(0, prefix.size()) == prefix;
}
diff --git a/chromium/media/cdm/key_system_names.h b/chromium/media/cdm/key_system_names.h
index c1818490bb0..6c388b98c84 100644
--- a/chromium/media/cdm/key_system_names.h
+++ b/chromium/media/cdm/key_system_names.h
@@ -20,9 +20,11 @@ MEDIA_EXPORT extern const char kClearKey[];
MEDIA_EXPORT extern const char kExternalClearKey[];
// Returns true if |key_system| is Clear Key, false otherwise.
-MEDIA_EXPORT inline bool IsClearKey(const std::string& key_system) {
- return key_system == kClearKey;
-}
+MEDIA_EXPORT bool IsClearKey(const std::string& key_system);
+
+// Returns true if |key_system| is (reverse) sub-domain of |parent_key_system|.
+MEDIA_EXPORT bool IsParentKeySystemOf(const std::string& parent_key_system,
+ const std::string& key_system);
// Returns true if |key_system| is External Clear Key, false otherwise.
MEDIA_EXPORT bool IsExternalClearKey(const std::string& key_system);
diff --git a/chromium/media/cdm/ppapi/BUILD.gn b/chromium/media/cdm/ppapi/BUILD.gn
new file mode 100644
index 00000000000..585e6bf00a2
--- /dev/null
+++ b/chromium/media/cdm/ppapi/BUILD.gn
@@ -0,0 +1,76 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/features.gni")
+import("//chrome/version.gni") # TODO layering violation!
+import("//media/cdm/ppapi/cdm_adapter.gni")
+
+# Android doesn't use ffmpeg.
+use_ffmpeg = !is_android
+
+# The GYP version supports build flags "use_fake_video_decoder" and
+# "use_vpx". These should be added here if necessary but its not clear if
+# they are required any more.
+shared_library("clearkeycdm") {
+ sources = [
+ "cdm_file_io_test.cc",
+ "cdm_file_io_test.h",
+ "external_clear_key/cdm_video_decoder.cc",
+ "external_clear_key/cdm_video_decoder.h",
+ "external_clear_key/clear_key_cdm.cc",
+ "external_clear_key/clear_key_cdm.h",
+ "external_clear_key/clear_key_cdm_common.h",
+ ]
+
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ defines = [ "CDM_IMPLEMENTATION" ]
+
+ deps = [
+ "//base",
+ "//media", # For media::AudioTimestampHelper
+ "//media:shared_memory_support", # For media::AudioBus.
+ "//url",
+ ]
+
+ if (use_ffmpeg) {
+ sources += [
+ "external_clear_key/ffmpeg_cdm_audio_decoder.cc",
+ "external_clear_key/ffmpeg_cdm_audio_decoder.h",
+ "external_clear_key/ffmpeg_cdm_video_decoder.cc",
+ "external_clear_key/ffmpeg_cdm_video_decoder.h",
+ ]
+ defines += [ "CLEAR_KEY_CDM_USE_FFMPEG_DECODER" ]
+ deps += [ "//third_party/ffmpeg" ]
+ }
+
+ # TODO(GYP) on Mac: 'DYLIB_INSTALL_NAME_BASE': '@loader_path',
+}
+
+process_version("clearkeycdmadapter_resources") {
+ visibility = [ ":*" ]
+ sources = [
+ "//media/clearkeycdmadapter.ver",
+ "external_clear_key/BRANDING",
+ ]
+ output = "$target_gen_dir/clearkeycdmadapter_version.rc"
+}
+
+cdm_adapter("clearkeycdmadapter") {
+ # Check whether the plugin's origin URL is valid.
+ defines = [ "CHECK_DOCUMENT_URL" ]
+ deps = [
+ ":clearkeycdm",
+ ":clearkeycdmadapter_resources",
+ "//ppapi/cpp",
+ ]
+
+ if (is_posix && !is_mac && enable_pepper_cdms) {
+ # Because clearkeycdm has type 'loadable_module', we must explicitly
+ # specify this dependency.
+ ldflags = [ rebase_path("$root_out_dir/libclearkeycdm.so", root_build_dir) ]
+ libs = [ "rt" ]
+ }
+}
diff --git a/chromium/media/cdm/ppapi/api/content_decryption_module.h b/chromium/media/cdm/ppapi/api/content_decryption_module.h
index 9aaa4ee361d..1e5e6f6ea95 100644
--- a/chromium/media/cdm/ppapi/api/content_decryption_module.h
+++ b/chromium/media/cdm/ppapi/api/content_decryption_module.h
@@ -76,12 +76,12 @@ CDM_EXPORT const char* GetCdmVersion();
namespace cdm {
class AudioFrames;
-
-class Host_6;
-
class DecryptedBlock;
class VideoFrame;
+class Host_7;
+class Host_8;
+
enum Status {
kSuccess = 0,
kNeedMoreData, // Decoder needs more data to produce a decoded frame/sample.
@@ -92,17 +92,8 @@ enum Status {
kDeferredInitialization // Decoder is not ready for initialization.
};
-// This must be consistent with MediaKeyError defined in the spec:
-// https://dvcs.w3.org/hg/html-media/raw-file/eme-v0.1b/encrypted-media/encrypted-media.html#error-codes
-// Support the minimum required set with backwards compatible values.
-enum MediaKeyError {
- kPrefixedUnknownError = 1,
- kPrefixedClientError = 2,
- kPrefixedOutputError = 4
-};
-
// This must at least contain the exceptions defined in the spec:
-// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#exceptions
+// https://w3c.github.io/encrypted-media/#exceptions
// The following starts with the list of DOM4 exceptions from:
// http://www.w3.org/TR/dom/#domexception
// Some DOM4 exceptions are not included as they are not expected to be used.
@@ -147,7 +138,6 @@ typedef double Time;
// |<----- subsample1 ----->|<----- subsample2 ----->|<----- subsample3 ----->|
// | clear1 | decrypted1| clear2 | decrypted2 | clear3 | decrypted3 |
//
-// TODO(xhwang): Add checks to make sure these structs have fixed layout.
struct SubsampleEntry {
SubsampleEntry(uint32_t clear_bytes, uint32_t cipher_bytes)
: clear_bytes(clear_bytes), cipher_bytes(cipher_bytes) {}
@@ -309,6 +299,29 @@ struct BinaryData {
uint32_t length;
};
+// The current status of the associated key. The valid types are defined in the
+// spec: https://w3c.github.io/encrypted-media/#idl-def-MediaKeyStatus
+enum KeyStatus {
+ kUsable = 0,
+ kInternalError = 1,
+ kExpired = 2,
+ kOutputNotAllowed = 3,
+ kOutputDownscaled = 4,
+ kStatusPending = 5
+};
+
+// Used when passing arrays of key information. Does not own the referenced
+// data. |system_code| is an additional error code for unusable keys and
+// should be 0 when |status| == kUsable.
+struct KeyInformation {
+ KeyInformation()
+ : key_id(NULL), key_id_size(0), status(kInternalError), system_code(0) {}
+ const uint8_t* key_id;
+ uint32_t key_id_size;
+ KeyStatus status;
+ uint32_t system_code;
+};
+
// Supported output protection methods for use with EnableOutputProtection() and
// returned by OnQueryOutputProtectionStatus().
enum OutputProtectionMethods {
@@ -328,11 +341,34 @@ enum OutputLinkTypes {
kLinkTypeNetwork = 1 << 6
};
+// Result of the QueryOutputProtectionStatus() call.
+enum QueryResult {
+ kQuerySucceeded = 0,
+ kQueryFailed
+};
+
+// The Initialization Data Type. The valid types are defined in the spec:
+// http://w3c.github.io/encrypted-media/initdata-format-registry.html#registry
+enum InitDataType {
+ kCenc = 0,
+ kKeyIds = 1,
+ kWebM = 2
+};
+
// The type of session to create. The valid types are defined in the spec:
-// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#dom-sessiontype
+// https://w3c.github.io/encrypted-media/#idl-def-SessionType
enum SessionType {
kTemporary = 0,
- kPersistent = 1
+ kPersistentLicense = 1,
+ kPersistentKeyRelease = 2
+};
+
+// The type of the message event. The valid types are defined in the spec:
+// https://w3c.github.io/encrypted-media/#idl-def-MediaKeyMessageType
+enum MessageType {
+ kLicenseRequest = 0,
+ kLicenseRenewal = 1,
+ kLicenseRelease = 2
};
// FileIO interface provides a way for the CDM to store data in a file in
@@ -425,69 +461,253 @@ class FileIOClient {
// provided in CreateCdmInstance() to allocate any Buffer that needs to
// be passed back to the caller. Implementations must call Buffer::Destroy()
// when a Buffer is created that will never be returned to the caller.
-class ContentDecryptionModule_6 {
+class ContentDecryptionModule_7 {
public:
- static const int kVersion = 6;
- typedef Host_6 Host;
+ static const int kVersion = 7;
+ typedef Host_7 Host;
- // CreateSession(), LoadSession(), UpdateSession(), and ReleaseSession()
- // accept a |promise_id|, which must be passed to the completion Host method
+ // SetServerCertificate(), CreateSessionAndGenerateRequest(), LoadSession(),
+ // UpdateSession(), CloseSession(), and RemoveSession() all accept a
+ // |promise_id|, which must be passed to the completion Host method
// (e.g. Host::OnResolveNewSessionPromise()).
- // Creates a session given |init_data_type|, |init_data| and |session_type|.
+ // Provides a server certificate to be used to encrypt messages to the
+ // license server. The CDM must respond by calling either
+ // Host::OnResolvePromise() or Host::OnRejectPromise().
+ virtual void SetServerCertificate(uint32_t promise_id,
+ const uint8_t* server_certificate_data,
+ uint32_t server_certificate_data_size) = 0;
+
+ // Creates a session given |session_type|, |init_data_type|, and |init_data|.
// The CDM must respond by calling either Host::OnResolveNewSessionPromise()
// or Host::OnRejectPromise().
- virtual void CreateSession(
- uint32_t promise_id,
- const char* init_data_type, uint32_t init_data_type_size,
- const uint8_t* init_data, uint32_t init_data_size,
- SessionType session_type) = 0;
-
- // Loads the session with |web_session_id|. The CDM must respond by calling
- // either Host::OnResolveNewSessionPromise() or Host::OnRejectPromise().
- // If the session is not found, call Host::OnResolveNewSessionPromise()
- // with web_session_id = NULL.
- virtual void LoadSession(
- uint32_t promise_id,
- const char* web_session_id, uint32_t web_session_id_length) = 0;
+ virtual void CreateSessionAndGenerateRequest(uint32_t promise_id,
+ SessionType session_type,
+ const char* init_data_type,
+ uint32_t init_data_type_size,
+ const uint8_t* init_data,
+ uint32_t init_data_size) = 0;
+
+ // Loads the session of type |session_type| specified by |session_id|.
+ // The CDM must respond by calling either Host::OnResolveNewSessionPromise()
+ // or Host::OnRejectPromise(). If the session is not found, call
+ // Host::OnResolveNewSessionPromise() with session_id = NULL.
+ virtual void LoadSession(uint32_t promise_id,
+ SessionType session_type,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
// Updates the session with |response|. The CDM must respond by calling
// either Host::OnResolvePromise() or Host::OnRejectPromise().
- virtual void UpdateSession(
- uint32_t promise_id,
- const char* web_session_id, uint32_t web_session_id_length,
- const uint8_t* response, uint32_t response_size) = 0;
+ virtual void UpdateSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size,
+ const uint8_t* response,
+ uint32_t response_size) = 0;
// Requests that the CDM close the session. The CDM must respond by calling
// either Host::OnResolvePromise() or Host::OnRejectPromise() when the request
// has been processed. This may be before the session is closed. Once the
// session is closed, Host::OnSessionClosed() must also be called.
- virtual void CloseSession(
- uint32_t promise_id,
- const char* web_session_id, uint32_t web_session_id_length) = 0;
+ virtual void CloseSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
// Removes any stored session data associated with this session. Will only be
// called for persistent sessions. The CDM must respond by calling either
// Host::OnResolvePromise() or Host::OnRejectPromise() when the request has
// been processed.
- virtual void RemoveSession(
- uint32_t promise_id,
- const char* web_session_id, uint32_t web_session_id_length) = 0;
+ virtual void RemoveSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
+
+ // Performs scheduled operation with |context| when the timer fires.
+ virtual void TimerExpired(void* context) = 0;
+
+ // Decrypts the |encrypted_buffer|.
+ //
+ // Returns kSuccess if decryption succeeded, in which case the callee
+ // should have filled the |decrypted_buffer| and passed the ownership of
+ // |data| in |decrypted_buffer| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kDecryptError if any other error happened.
+ // If the return value is not kSuccess, |decrypted_buffer| should be ignored
+ // by the caller.
+ virtual Status Decrypt(const InputBuffer& encrypted_buffer,
+ DecryptedBlock* decrypted_buffer) = 0;
+
+ // Initializes the CDM audio decoder with |audio_decoder_config|. This
+ // function must be called before DecryptAndDecodeSamples() is called.
+ //
+ // Returns kSuccess if the |audio_decoder_config| is supported and the CDM
+ // audio decoder is successfully initialized.
+ // Returns kSessionError if |audio_decoder_config| is not supported. The CDM
+ // may still be able to do Decrypt().
+ // Returns kDeferredInitialization if the CDM is not ready to initialize the
+ // decoder at this time. Must call Host::OnDeferredInitializationDone() once
+ // initialization is complete.
+ virtual Status InitializeAudioDecoder(
+ const AudioDecoderConfig& audio_decoder_config) = 0;
+
+ // Initializes the CDM video decoder with |video_decoder_config|. This
+ // function must be called before DecryptAndDecodeFrame() is called.
+ //
+ // Returns kSuccess if the |video_decoder_config| is supported and the CDM
+ // video decoder is successfully initialized.
+ // Returns kSessionError if |video_decoder_config| is not supported. The CDM
+ // may still be able to do Decrypt().
+ // Returns kDeferredInitialization if the CDM is not ready to initialize the
+ // decoder at this time. Must call Host::OnDeferredInitializationDone() once
+ // initialization is complete.
+ virtual Status InitializeVideoDecoder(
+ const VideoDecoderConfig& video_decoder_config) = 0;
+
+ // De-initializes the CDM decoder and sets it to an uninitialized state. The
+ // caller can initialize the decoder again after this call to re-initialize
+ // it. This can be used to reconfigure the decoder if the configuration
+ // changes.
+ virtual void DeinitializeDecoder(StreamType decoder_type) = 0;
+
+ // Resets the CDM decoder to an initialized clean state. All internal buffers
+ // MUST be flushed.
+ virtual void ResetDecoder(StreamType decoder_type) = 0;
+
+ // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into a
+ // |video_frame|. Upon end-of-stream, the caller should call this function
+ // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
+ // |video_frame| (|format| == kEmptyVideoFrame) is produced.
+ //
+ // Returns kSuccess if decryption and decoding both succeeded, in which case
+ // the callee will have filled the |video_frame| and passed the ownership of
+ // |frame_buffer| in |video_frame| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kNeedMoreData if more data was needed by the decoder to generate
+ // a decoded frame (e.g. during initialization and end-of-stream).
+ // Returns kDecryptError if any decryption error happened.
+ // Returns kDecodeError if any decoding error happened.
+ // If the return value is not kSuccess, |video_frame| should be ignored by
+ // the caller.
+ virtual Status DecryptAndDecodeFrame(const InputBuffer& encrypted_buffer,
+ VideoFrame* video_frame) = 0;
+
+ // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into
+ // |audio_frames|. Upon end-of-stream, the caller should call this function
+ // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
+ // |audio_frames| is produced.
+ //
+ // Returns kSuccess if decryption and decoding both succeeded, in which case
+ // the callee will have filled |audio_frames| and passed the ownership of
+ // |data| in |audio_frames| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kNeedMoreData if more data was needed by the decoder to generate
+ // audio samples (e.g. during initialization and end-of-stream).
+ // Returns kDecryptError if any decryption error happened.
+ // Returns kDecodeError if any decoding error happened.
+ // If the return value is not kSuccess, |audio_frames| should be ignored by
+ // the caller.
+ virtual Status DecryptAndDecodeSamples(const InputBuffer& encrypted_buffer,
+ AudioFrames* audio_frames) = 0;
+
+ // Called by the host after a platform challenge was initiated via
+ // Host::SendPlatformChallenge().
+ virtual void OnPlatformChallengeResponse(
+ const PlatformChallengeResponse& response) = 0;
+
+ // Called by the host after a call to Host::QueryOutputProtectionStatus(). The
+ // |link_mask| is a bit mask of OutputLinkTypes and |output_protection_mask|
+ // is a bit mask of OutputProtectionMethods. If |result| is kQueryFailed,
+ // then |link_mask| and |output_protection_mask| are undefined and should
+ // be ignored.
+ virtual void OnQueryOutputProtectionStatus(
+ QueryResult result,
+ uint32_t link_mask,
+ uint32_t output_protection_mask) = 0;
+
+ // Destroys the object in the same context as it was created.
+ virtual void Destroy() = 0;
+
+ protected:
+ ContentDecryptionModule_7() {}
+ virtual ~ContentDecryptionModule_7() {}
+};
- // Requests the key IDs for keys in the session that the CDM knows are
- // currently usable to decrypt media data. The CDM must respond by calling
- // either Host::OnResolveKeyIdsPromise() or Host::OnRejectPromise().
- virtual void GetUsableKeyIds(
- uint32_t promise_id,
- const char* web_session_id, uint32_t web_session_id_length) = 0;
+// ContentDecryptionModule interface that all CDMs need to implement.
+// The interface is versioned for backward compatibility.
+// Note: ContentDecryptionModule implementations must use the allocator
+// provided in CreateCdmInstance() to allocate any Buffer that needs to
+// be passed back to the caller. Implementations must call Buffer::Destroy()
+// when a Buffer is created that will never be returned to the caller.
+class ContentDecryptionModule_8 {
+ public:
+ static const int kVersion = 8;
+ typedef Host_8 Host;
+
+ // Initializes the CDM instance, providing information about permitted
+ // functionalities.
+ // If |allow_distinctive_identifier| is false, messages from the CDM,
+ // such as message events, must not contain a Distinctive Identifier,
+ // even in an encrypted form.
+ // If |allow_persistent_state| is false, the CDM must not attempt to
+ // persist state. Calls to CreateFileIO() will fail.
+ virtual void Initialize(bool allow_distinctive_identifier,
+ bool allow_persistent_state) = 0;
+
+ // SetServerCertificate(), CreateSessionAndGenerateRequest(), LoadSession(),
+ // UpdateSession(), CloseSession(), and RemoveSession() all accept a
+ // |promise_id|, which must be passed to the completion Host method
+ // (e.g. Host::OnResolveNewSessionPromise()).
// Provides a server certificate to be used to encrypt messages to the
// license server. The CDM must respond by calling either
// Host::OnResolvePromise() or Host::OnRejectPromise().
- virtual void SetServerCertificate(
- uint32_t promise_id,
- const uint8_t* server_certificate_data,
- uint32_t server_certificate_data_size) = 0;
+ virtual void SetServerCertificate(uint32_t promise_id,
+ const uint8_t* server_certificate_data,
+ uint32_t server_certificate_data_size) = 0;
+
+ // Creates a session given |session_type|, |init_data_type|, and |init_data|.
+ // The CDM must respond by calling either Host::OnResolveNewSessionPromise()
+ // or Host::OnRejectPromise().
+ virtual void CreateSessionAndGenerateRequest(uint32_t promise_id,
+ SessionType session_type,
+ InitDataType init_data_type,
+ const uint8_t* init_data,
+ uint32_t init_data_size) = 0;
+
+ // Loads the session of type |session_type| specified by |session_id|.
+ // The CDM must respond by calling either Host::OnResolveNewSessionPromise()
+ // or Host::OnRejectPromise(). If the session is not found, call
+ // Host::OnResolveNewSessionPromise() with session_id = NULL.
+ virtual void LoadSession(uint32_t promise_id,
+ SessionType session_type,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
+
+ // Updates the session with |response|. The CDM must respond by calling
+ // either Host::OnResolvePromise() or Host::OnRejectPromise().
+ virtual void UpdateSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size,
+ const uint8_t* response,
+ uint32_t response_size) = 0;
+
+ // Requests that the CDM close the session. The CDM must respond by calling
+ // either Host::OnResolvePromise() or Host::OnRejectPromise() when the request
+ // has been processed. This may be before the session is closed. Once the
+ // session is closed, Host::OnSessionClosed() must also be called.
+ virtual void CloseSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
+
+ // Removes any stored session data associated with this session. Will only be
+ // called for persistent sessions. The CDM must respond by calling either
+ // Host::OnResolvePromise() or Host::OnRejectPromise() when the request has
+ // been processed.
+ virtual void RemoveSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
// Performs scheduled operation with |context| when the timer fires.
virtual void TimerExpired(void* context) = 0;
@@ -586,19 +806,23 @@ class ContentDecryptionModule_6 {
// Called by the host after a call to Host::QueryOutputProtectionStatus(). The
// |link_mask| is a bit mask of OutputLinkTypes and |output_protection_mask|
- // is a bit mask of OutputProtectionMethods.
+ // is a bit mask of OutputProtectionMethods. If |result| is kQueryFailed,
+ // then |link_mask| and |output_protection_mask| are undefined and should
+ // be ignored.
virtual void OnQueryOutputProtectionStatus(
- uint32_t link_mask, uint32_t output_protection_mask) = 0;
+ QueryResult result,
+ uint32_t link_mask,
+ uint32_t output_protection_mask) = 0;
// Destroys the object in the same context as it was created.
virtual void Destroy() = 0;
protected:
- ContentDecryptionModule_6() {}
- virtual ~ContentDecryptionModule_6() {}
+ ContentDecryptionModule_8() {}
+ virtual ~ContentDecryptionModule_8() {}
};
-typedef ContentDecryptionModule_6 ContentDecryptionModule;
+typedef ContentDecryptionModule_8 ContentDecryptionModule;
// Represents a buffer created by Allocator implementations.
class Buffer {
@@ -620,9 +844,9 @@ class Buffer {
void operator=(const Buffer&);
};
-class Host_6 {
+class Host_7 {
public:
- static const int kVersion = 6;
+ static const int kVersion = 7;
// Returns a Buffer* containing non-zero members upon success, or NULL on
// failure. The caller owns the Buffer* after this call. The buffer is not
@@ -638,74 +862,219 @@ class Host_6 {
virtual Time GetCurrentWallTime() = 0;
// Called by the CDM when a session is created or loaded and the value for the
- // MediaKeySession's sessionId attribute is available (|web_session_id|).
- // This must be called before OnSessionMessage() or OnSessionReady() is called
- // for the same session. |web_session_id_length| should not include null
- // termination.
- // When called in response to LoadSession(), the |web_session_id| must be the
- // same as the |web_session_id| passed in LoadSession(), or NULL if the
+ // MediaKeySession's sessionId attribute is available (|session_id|).
+ // This must be called before OnSessionMessage() or
+ // OnSessionKeysChange() is called for the same session. |session_id_size|
+ // should not include null termination.
+ // When called in response to LoadSession(), the |session_id| must be the
+ // same as the |session_id| passed in LoadSession(), or NULL if the
// session could not be loaded.
- virtual void OnResolveNewSessionPromise(
- uint32_t promise_id,
- const char* web_session_id, uint32_t web_session_id_length) = 0;
+ virtual void OnResolveNewSessionPromise(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
// Called by the CDM when a session is updated or released.
virtual void OnResolvePromise(uint32_t promise_id) = 0;
- // Called by the CDM to return a list of key IDs. The caller owns the
- // BinaryData array and the values the elements reference.
- virtual void OnResolveKeyIdsPromise(
- uint32_t promise_id,
- const BinaryData* key_ids, uint32_t key_ids_length) = 0;
-
// Called by the CDM when an error occurs as a result of one of the
// ContentDecryptionModule calls that accept a |promise_id|.
// |error| must be specified, |error_message| and |system_code|
- // are optional. Length parameters should not include null termination.
- virtual void OnRejectPromise(
- uint32_t promise_id,
+ // are optional. |error_message_size| should not include null termination.
+ virtual void OnRejectPromise(uint32_t promise_id,
+ Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_size) = 0;
+
+ // Called by the CDM when it has a message for session |session_id|.
+ // Size parameters should not include null termination.
+ // |legacy_destination_url| is only for supporting the prefixed EME API and
+ // is ignored by unprefixed EME. It should only be non-null if |message_type|
+ // is kLicenseRenewal.
+ virtual void OnSessionMessage(const char* session_id,
+ uint32_t session_id_size,
+ MessageType message_type,
+ const char* message,
+ uint32_t message_size,
+ const char* legacy_destination_url,
+ uint32_t legacy_destination_url_length) = 0;
+
+ // Called by the CDM when there has been a change in keys or their status for
+ // session |session_id|. |has_additional_usable_key| should be set if a
+ // key is newly usable (e.g. new key available, previously expired key has
+ // been renewed, etc.) and the browser should attempt to resume playback.
+ // |key_ids| is the list of key ids for this session along with their
+ // current status. |key_ids_count| is the number of entries in |key_ids|.
+ // Size parameter for |session_id| should not include null termination.
+ virtual void OnSessionKeysChange(const char* session_id,
+ uint32_t session_id_size,
+ bool has_additional_usable_key,
+ const KeyInformation* keys_info,
+ uint32_t keys_info_count) = 0;
+
+ // Called by the CDM when there has been a change in the expiration time for
+ // session |session_id|. This can happen as the result of an Update() call
+ // or some other event. If this happens as a result of a call to Update(),
+ // it must be called before resolving the Update() promise. |new_expiry_time|
+ // can be 0 to represent "undefined". Size parameter should not include
+ // null termination.
+ virtual void OnExpirationChange(const char* session_id,
+ uint32_t session_id_size,
+ Time new_expiry_time) = 0;
+
+ // Called by the CDM when session |session_id| is closed. Size
+ // parameter should not include null termination.
+ virtual void OnSessionClosed(const char* session_id,
+ uint32_t session_id_size) = 0;
+
+ // Called by the CDM when an error occurs in session |session_id|
+ // unrelated to one of the ContentDecryptionModule calls that accept a
+ // |promise_id|. |error| must be specified, |error_message| and
+ // |system_code| are optional. Length parameters should not include null
+ // termination.
+ // Note:
+ // - This method is only for supporting prefixed EME API.
+ // - This method will be ignored by unprefixed EME. All errors reported
+ // in this method should probably also be reported by one of other methods.
+ virtual void OnLegacySessionError(
+ const char* session_id, uint32_t session_id_length,
Error error,
uint32_t system_code,
const char* error_message, uint32_t error_message_length) = 0;
- // Called by the CDM when it has a message for session |web_session_id|.
- // Length parameters should not include null termination.
- virtual void OnSessionMessage(
- const char* web_session_id, uint32_t web_session_id_length,
- const char* message, uint32_t message_length,
- const char* destination_url, uint32_t destination_url_length) = 0;
+ // The following are optional methods that may not be implemented on all
+ // platforms.
+
+ // Sends a platform challenge for the given |service_id|. |challenge| is at
+ // most 256 bits of data to be signed. Once the challenge has been completed,
+ // the host will call ContentDecryptionModule::OnPlatformChallengeResponse()
+ // with the signed challenge response and platform certificate. Size
+ // parameters should not include null termination.
+ virtual void SendPlatformChallenge(const char* service_id,
+ uint32_t service_id_size,
+ const char* challenge,
+ uint32_t challenge_size) = 0;
+
+ // Attempts to enable output protection (e.g. HDCP) on the display link. The
+ // |desired_protection_mask| is a bit mask of OutputProtectionMethods. No
+ // status callback is issued, the CDM must call QueryOutputProtectionStatus()
+ // periodically to ensure the desired protections are applied.
+ virtual void EnableOutputProtection(uint32_t desired_protection_mask) = 0;
+
+ // Requests the current output protection status. Once the host has the status
+ // it will call ContentDecryptionModule::OnQueryOutputProtectionStatus().
+ virtual void QueryOutputProtectionStatus() = 0;
+
+ // Must be called by the CDM if it returned kDeferredInitialization during
+ // InitializeAudioDecoder() or InitializeVideoDecoder().
+ virtual void OnDeferredInitializationDone(StreamType stream_type,
+ Status decoder_status) = 0;
+
+ // Creates a FileIO object from the host to do file IO operation. Returns NULL
+ // if a FileIO object cannot be obtained. Once a valid FileIO object is
+ // returned, |client| must be valid until FileIO::Close() is called. The
+ // CDM can call this method multiple times to operate on different files.
+ virtual FileIO* CreateFileIO(FileIOClient* client) = 0;
- // Called by the CDM when there has been a change in usable keys for
- // session |web_session_id|. |has_additional_usable_key| should be set if a
+ protected:
+ Host_7() {}
+ virtual ~Host_7() {}
+};
+
+class Host_8 {
+ public:
+ static const int kVersion = 8;
+
+ // Returns a Buffer* containing non-zero members upon success, or NULL on
+ // failure. The caller owns the Buffer* after this call. The buffer is not
+ // guaranteed to be zero initialized. The capacity of the allocated Buffer
+ // is guaranteed to be not less than |capacity|.
+ virtual Buffer* Allocate(uint32_t capacity) = 0;
+
+ // Requests the host to call ContentDecryptionModule::TimerFired() |delay_ms|
+ // from now with |context|.
+ virtual void SetTimer(int64_t delay_ms, void* context) = 0;
+
+ // Returns the current wall time in seconds.
+ virtual Time GetCurrentWallTime() = 0;
+
+ // Called by the CDM when a session is created or loaded and the value for the
+ // MediaKeySession's sessionId attribute is available (|session_id|).
+ // This must be called before OnSessionMessage() or
+ // OnSessionKeysChange() is called for the same session. |session_id_size|
+ // should not include null termination.
+ // When called in response to LoadSession(), the |session_id| must be the
+ // same as the |session_id| passed in LoadSession(), or NULL if the
+ // session could not be loaded.
+ virtual void OnResolveNewSessionPromise(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
+
+ // Called by the CDM when a session is updated or released.
+ virtual void OnResolvePromise(uint32_t promise_id) = 0;
+
+ // Called by the CDM when an error occurs as a result of one of the
+ // ContentDecryptionModule calls that accept a |promise_id|.
+ // |error| must be specified, |error_message| and |system_code|
+ // are optional. |error_message_size| should not include null termination.
+ virtual void OnRejectPromise(uint32_t promise_id,
+ Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_size) = 0;
+
+ // Called by the CDM when it has a message for session |session_id|.
+ // Size parameters should not include null termination.
+ // |legacy_destination_url| is only for supporting the prefixed EME API and
+ // is ignored by unprefixed EME. It should only be non-null if |message_type|
+ // is kLicenseRenewal.
+ virtual void OnSessionMessage(const char* session_id,
+ uint32_t session_id_size,
+ MessageType message_type,
+ const char* message,
+ uint32_t message_size,
+ const char* legacy_destination_url,
+ uint32_t legacy_destination_url_length) = 0;
+
+ // Called by the CDM when there has been a change in keys or their status for
+ // session |session_id|. |has_additional_usable_key| should be set if a
// key is newly usable (e.g. new key available, previously expired key has
// been renewed, etc.) and the browser should attempt to resume playback.
- // Length parameter should not include null termination.
- virtual void OnSessionUsableKeysChange(
- const char* web_session_id, uint32_t web_session_id_length,
- bool has_additional_usable_key) = 0;
+ // |key_ids| is the list of key ids for this session along with their
+ // current status. |key_ids_count| is the number of entries in |key_ids|.
+ // Size parameter for |session_id| should not include null termination.
+ virtual void OnSessionKeysChange(const char* session_id,
+ uint32_t session_id_size,
+ bool has_additional_usable_key,
+ const KeyInformation* keys_info,
+ uint32_t keys_info_count) = 0;
// Called by the CDM when there has been a change in the expiration time for
- // session |web_session_id|. This can happen as the result of an Update() call
+ // session |session_id|. This can happen as the result of an Update() call
// or some other event. If this happens as a result of a call to Update(),
// it must be called before resolving the Update() promise. |new_expiry_time|
- // can be 0 to represent "undefined". Length parameter should not include
+ // can be 0 to represent "undefined". Size parameter should not include
// null termination.
- virtual void OnExpirationChange(
- const char* web_session_id, uint32_t web_session_id_length,
- Time new_expiry_time) = 0;
+ virtual void OnExpirationChange(const char* session_id,
+ uint32_t session_id_size,
+ Time new_expiry_time) = 0;
- // Called by the CDM when session |web_session_id| is closed. Length
+ // Called by the CDM when session |session_id| is closed. Size
// parameter should not include null termination.
- virtual void OnSessionClosed(
- const char* web_session_id, uint32_t web_session_id_length) = 0;
+ virtual void OnSessionClosed(const char* session_id,
+ uint32_t session_id_size) = 0;
- // Called by the CDM when an error occurs in session |web_session_id|
+ // Called by the CDM when an error occurs in session |session_id|
// unrelated to one of the ContentDecryptionModule calls that accept a
// |promise_id|. |error| must be specified, |error_message| and
// |system_code| are optional. Length parameters should not include null
// termination.
- virtual void OnSessionError(
- const char* web_session_id, uint32_t web_session_id_length,
+ // Note:
+ // - This method is only for supporting prefixed EME API.
+ // - This method will be ignored by unprefixed EME. All errors reported
+ // in this method should probably also be reported by one of other methods.
+ virtual void OnLegacySessionError(
+ const char* session_id, uint32_t session_id_length,
Error error,
uint32_t system_code,
const char* error_message, uint32_t error_message_length) = 0;
@@ -716,11 +1085,12 @@ class Host_6 {
// Sends a platform challenge for the given |service_id|. |challenge| is at
// most 256 bits of data to be signed. Once the challenge has been completed,
// the host will call ContentDecryptionModule::OnPlatformChallengeResponse()
- // with the signed challenge response and platform certificate. Length
+ // with the signed challenge response and platform certificate. Size
// parameters should not include null termination.
- virtual void SendPlatformChallenge(
- const char* service_id, uint32_t service_id_length,
- const char* challenge, uint32_t challenge_length) = 0;
+ virtual void SendPlatformChallenge(const char* service_id,
+ uint32_t service_id_size,
+ const char* challenge,
+ uint32_t challenge_size) = 0;
// Attempts to enable output protection (e.g. HDCP) on the display link. The
// |desired_protection_mask| is a bit mask of OutputProtectionMethods. No
@@ -744,8 +1114,8 @@ class Host_6 {
virtual FileIO* CreateFileIO(FileIOClient* client) = 0;
protected:
- Host_6() {}
- virtual ~Host_6() {}
+ Host_8() {}
+ virtual ~Host_8() {}
};
// Represents a decrypted block that has not been decoded.
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.cc b/chromium/media/cdm/ppapi/cdm_adapter.cc
index 83bf8997dc2..29ca3ea9849 100644
--- a/chromium/media/cdm/ppapi/cdm_adapter.cc
+++ b/chromium/media/cdm/ppapi/cdm_adapter.cc
@@ -6,7 +6,6 @@
#include "media/base/limits.h"
#include "media/cdm/ppapi/cdm_file_io_impl.h"
-#include "media/cdm/ppapi/cdm_helpers.h"
#include "media/cdm/ppapi/cdm_logging.h"
#include "media/cdm/ppapi/supported_cdm_versions.h"
#include "ppapi/c/ppb_console.h"
@@ -105,10 +104,16 @@ PP_DecryptResult CdmStatusToPpDecryptResult(cdm::Status status) {
return PP_DECRYPTRESULT_DECRYPT_ERROR;
case cdm::kDecodeError:
return PP_DECRYPTRESULT_DECODE_ERROR;
- default:
- PP_NOTREACHED();
- return PP_DECRYPTRESULT_DECODE_ERROR;
+ case cdm::kSessionError:
+ case cdm::kDeferredInitialization:
+ // kSessionError and kDeferredInitialization are only used by the
+ // Initialize* methods internally and never returned. Deliver*
+ // methods should never use these values.
+ break;
}
+
+ PP_NOTREACHED();
+ return PP_DECRYPTRESULT_DECRYPT_ERROR;
}
PP_DecryptedFrameFormat CdmVideoFormatToPpDecryptedFrameFormat(
@@ -222,12 +227,29 @@ cdm::SessionType PpSessionTypeToCdmSessionType(PP_SessionType session_type) {
switch (session_type) {
case PP_SESSIONTYPE_TEMPORARY:
return cdm::kTemporary;
- case PP_SESSIONTYPE_PERSISTENT:
- return cdm::kPersistent;
- default:
- PP_NOTREACHED();
- return cdm::kTemporary;
+ case PP_SESSIONTYPE_PERSISTENT_LICENSE:
+ return cdm::kPersistentLicense;
+ case PP_SESSIONTYPE_PERSISTENT_RELEASE:
+ return cdm::kPersistentKeyRelease;
+ }
+
+ PP_NOTREACHED();
+ return cdm::kTemporary;
+}
+
+cdm::InitDataType PpInitDataTypeToCdmInitDataType(
+ PP_InitDataType init_data_type) {
+ switch (init_data_type) {
+ case PP_INITDATATYPE_CENC:
+ return cdm::kCenc;
+ case PP_INITDATATYPE_KEYIDS:
+ return cdm::kKeyIds;
+ case PP_INITDATATYPE_WEBM:
+ return cdm::kWebM;
}
+
+ PP_NOTREACHED();
+ return cdm::kKeyIds;
}
PP_CdmExceptionCode CdmExceptionTypeToPpCdmExceptionType(cdm::Error error) {
@@ -246,10 +268,44 @@ PP_CdmExceptionCode CdmExceptionTypeToPpCdmExceptionType(cdm::Error error) {
return PP_CDMEXCEPTIONCODE_CLIENTERROR;
case cdm::kOutputError:
return PP_CDMEXCEPTIONCODE_OUTPUTERROR;
- default:
- PP_NOTREACHED();
- return PP_CDMEXCEPTIONCODE_UNKNOWNERROR;
}
+
+ PP_NOTREACHED();
+ return PP_CDMEXCEPTIONCODE_UNKNOWNERROR;
+}
+
+PP_CdmMessageType CdmMessageTypeToPpMessageType(cdm::MessageType message) {
+ switch (message) {
+ case cdm::kLicenseRequest:
+ return PP_CDMMESSAGETYPE_LICENSE_REQUEST;
+ case cdm::kLicenseRenewal:
+ return PP_CDMMESSAGETYPE_LICENSE_RENEWAL;
+ case cdm::kLicenseRelease:
+ return PP_CDMMESSAGETYPE_LICENSE_RELEASE;
+ }
+
+ PP_NOTREACHED();
+ return PP_CDMMESSAGETYPE_LICENSE_REQUEST;
+}
+
+PP_CdmKeyStatus CdmKeyStatusToPpKeyStatus(cdm::KeyStatus status) {
+ switch (status) {
+ case cdm::kUsable:
+ return PP_CDMKEYSTATUS_USABLE;
+ case cdm::kInternalError:
+ return PP_CDMKEYSTATUS_INVALID;
+ case cdm::kExpired:
+ return PP_CDMKEYSTATUS_EXPIRED;
+ case cdm::kOutputNotAllowed:
+ return PP_CDMKEYSTATUS_OUTPUTNOTALLOWED;
+ case cdm::kOutputDownscaled:
+ return PP_CDMKEYSTATUS_OUTPUTDOWNSCALED;
+ case cdm::kStatusPending:
+ return PP_CDMKEYSTATUS_STATUSPENDING;
+ }
+
+ PP_NOTREACHED();
+ return PP_CDMKEYSTATUS_INVALID;
}
} // namespace
@@ -270,6 +326,8 @@ CdmAdapter::CdmAdapter(PP_Instance instance, pp::Module* module)
#endif
allocator_(this),
cdm_(NULL),
+ allow_distinctive_identifier_(false),
+ allow_persistent_state_(false),
deferred_initialize_audio_decoder_(false),
deferred_audio_decoder_config_id_(0),
deferred_initialize_video_decoder_(false),
@@ -281,36 +339,34 @@ CdmAdapter::CdmAdapter(PP_Instance instance, pp::Module* module)
CdmAdapter::~CdmAdapter() {}
-bool CdmAdapter::CreateCdmInstance(const std::string& key_system) {
- PP_DCHECK(!cdm_);
- cdm_ = make_linked_ptr(CdmWrapper::Create(
- key_system.data(), key_system.size(), GetCdmHost, this));
- bool success = cdm_ != NULL;
+CdmWrapper* CdmAdapter::CreateCdmInstance(const std::string& key_system) {
+ CdmWrapper* cdm = CdmWrapper::Create(key_system.data(), key_system.size(),
+ GetCdmHost, this);
const std::string message = "CDM instance for " + key_system +
- (success ? "" : " could not be") + " created.";
+ (cdm ? "" : " could not be") + " created.";
DLOG_TO_CONSOLE(message);
CDM_DLOG() << message;
- return success;
+ return cdm;
}
-// No errors should be reported in this function because the spec says:
-// "Store this new error object internally with the MediaKeys instance being
-// created. This will be used to fire an error against any session created for
-// this instance." These errors will be reported during session creation
-// (CreateSession()) or session loading (LoadSession()).
-// TODO(xhwang): If necessary, we need to store the error here if we want to
-// support more specific error reporting (other than "Unknown").
-void CdmAdapter::Initialize(const std::string& key_system) {
+void CdmAdapter::Initialize(uint32_t promise_id,
+ const std::string& key_system,
+ bool allow_distinctive_identifier,
+ bool allow_persistent_state) {
PP_DCHECK(!key_system.empty());
- PP_DCHECK(key_system_.empty() || (key_system_ == key_system && cdm_));
+ PP_DCHECK(!cdm_);
#if defined(CHECK_DOCUMENT_URL)
PP_URLComponents_Dev url_components = {};
const pp::URLUtil_Dev* url_util = pp::URLUtil_Dev::Get();
- if (!url_util)
+ if (!url_util) {
+ RejectPromise(promise_id, cdm::kUnknownError, 0,
+ "Unable to determine origin.");
return;
+ }
+
pp::Var href = url_util->GetDocumentURL(pp::InstanceHandle(pp_instance()),
&url_components);
PP_DCHECK(href.is_string());
@@ -325,11 +381,18 @@ void CdmAdapter::Initialize(const std::string& key_system) {
}
#endif // defined(CHECK_DOCUMENT_URL)
- if (!cdm_ && !CreateCdmInstance(key_system))
+ cdm_ = make_linked_ptr(CreateCdmInstance(key_system));
+ if (!cdm_) {
+ RejectPromise(promise_id, cdm::kInvalidAccessError, 0,
+ "Unable to create CDM.");
return;
+ }
- PP_DCHECK(cdm_);
key_system_ = key_system;
+ allow_distinctive_identifier_ = allow_distinctive_identifier;
+ allow_persistent_state_ = allow_persistent_state;
+ cdm_->Initialize(allow_distinctive_identifier, allow_persistent_state);
+ OnResolvePromise(promise_id);
}
void CdmAdapter::SetServerCertificate(uint32_t promise_id,
@@ -346,97 +409,49 @@ void CdmAdapter::SetServerCertificate(uint32_t promise_id,
return;
}
- // Initialize() doesn't report an error, so SetServerCertificate() can be
- // called even if Initialize() failed.
- // TODO(jrummell): Remove this code when prefixed EME gets removed.
- if (!cdm_) {
- RejectPromise(promise_id,
- cdm::kInvalidStateError,
- 0,
- "CDM has not been initialized.");
- return;
- }
-
cdm_->SetServerCertificate(
promise_id, server_certificate_ptr, server_certificate_size);
}
-void CdmAdapter::CreateSession(uint32_t promise_id,
- const std::string& init_data_type,
- pp::VarArrayBuffer init_data,
- PP_SessionType session_type) {
- // Initialize() doesn't report an error, so CreateSession() can be called
- // even if Initialize() failed.
- // TODO(jrummell): Remove this code when prefixed EME gets removed.
- // TODO(jrummell): Verify that Initialize() failing does not resolve the
- // MediaKeys.create() promise.
- if (!cdm_) {
- RejectPromise(promise_id,
- cdm::kInvalidStateError,
- 0,
- "CDM has not been initialized.");
- return;
- }
-
- cdm_->CreateSession(promise_id,
- init_data_type.data(),
- init_data_type.size(),
- static_cast<const uint8_t*>(init_data.Map()),
- init_data.ByteLength(),
- PpSessionTypeToCdmSessionType(session_type));
+void CdmAdapter::CreateSessionAndGenerateRequest(uint32_t promise_id,
+ PP_SessionType session_type,
+ PP_InitDataType init_data_type,
+ pp::VarArrayBuffer init_data) {
+ cdm_->CreateSessionAndGenerateRequest(
+ promise_id, PpSessionTypeToCdmSessionType(session_type),
+ PpInitDataTypeToCdmInitDataType(init_data_type),
+ static_cast<const uint8_t*>(init_data.Map()), init_data.ByteLength());
}
void CdmAdapter::LoadSession(uint32_t promise_id,
- const std::string& web_session_id) {
- // Initialize() doesn't report an error, so LoadSession() can be called
- // even if Initialize() failed.
- // TODO(jrummell): Remove this code when prefixed EME gets removed.
- // TODO(jrummell): Verify that Initialize() failing does not resolve the
- // MediaKeys.create() promise.
- if (!cdm_) {
- RejectPromise(promise_id,
- cdm::kInvalidStateError,
- 0,
- "CDM has not been initialized.");
- return;
- }
-
- cdm_->LoadSession(promise_id, web_session_id.data(), web_session_id.size());
+ PP_SessionType session_type,
+ const std::string& session_id) {
+ cdm_->LoadSession(promise_id, PpSessionTypeToCdmSessionType(session_type),
+ session_id.data(), session_id.size());
}
void CdmAdapter::UpdateSession(uint32_t promise_id,
- const std::string& web_session_id,
+ const std::string& session_id,
pp::VarArrayBuffer response) {
const uint8_t* response_ptr = static_cast<const uint8_t*>(response.Map());
const uint32_t response_size = response.ByteLength();
- PP_DCHECK(!web_session_id.empty());
+ PP_DCHECK(!session_id.empty());
PP_DCHECK(response_ptr);
PP_DCHECK(response_size > 0);
- cdm_->UpdateSession(promise_id,
- web_session_id.data(),
- web_session_id.length(),
- response_ptr,
- response_size);
+ cdm_->UpdateSession(promise_id, session_id.data(), session_id.length(),
+ response_ptr, response_size);
}
void CdmAdapter::CloseSession(uint32_t promise_id,
- const std::string& web_session_id) {
- cdm_->CloseSession(
- promise_id, web_session_id.data(), web_session_id.length());
+ const std::string& session_id) {
+ cdm_->CloseSession(promise_id, session_id.data(), session_id.length());
}
void CdmAdapter::RemoveSession(uint32_t promise_id,
- const std::string& web_session_id) {
- cdm_->RemoveSession(
- promise_id, web_session_id.data(), web_session_id.length());
-}
-
-void CdmAdapter::GetUsableKeyIds(uint32_t promise_id,
- const std::string& web_session_id) {
- cdm_->GetUsableKeyIds(
- promise_id, web_session_id.data(), web_session_id.length());
+ const std::string& session_id) {
+ cdm_->RemoveSession(promise_id, session_id.data(), session_id.length());
}
// Note: In the following decryption/decoding related functions, errors are NOT
@@ -638,12 +653,11 @@ cdm::Time CdmAdapter::GetCurrentWallTime() {
}
void CdmAdapter::OnResolveNewSessionPromise(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) {
+ const char* session_id,
+ uint32_t session_id_size) {
PostOnMain(callback_factory_.NewCallback(
- &CdmAdapter::SendPromiseResolvedWithSessionInternal,
- promise_id,
- std::string(web_session_id, web_session_id_length)));
+ &CdmAdapter::SendPromiseResolvedWithSessionInternal, promise_id,
+ std::string(session_id, session_id_size)));
}
void CdmAdapter::OnResolvePromise(uint32_t promise_id) {
@@ -651,26 +665,11 @@ void CdmAdapter::OnResolvePromise(uint32_t promise_id) {
&CdmAdapter::SendPromiseResolvedInternal, promise_id));
}
-void CdmAdapter::OnResolveKeyIdsPromise(uint32_t promise_id,
- const cdm::BinaryData* usable_key_ids,
- uint32_t usable_key_ids_length) {
- std::vector<std::vector<uint8> > key_ids;
- for (uint32_t i = 0; i < usable_key_ids_length; ++i) {
- key_ids.push_back(
- std::vector<uint8>(usable_key_ids[i].data,
- usable_key_ids[i].data + usable_key_ids[i].length));
- }
- PostOnMain(callback_factory_.NewCallback(
- &CdmAdapter::SendPromiseResolvedWithUsableKeyIdsInternal,
- promise_id,
- key_ids));
-}
-
void CdmAdapter::OnRejectPromise(uint32_t promise_id,
cdm::Error error,
uint32_t system_code,
const char* error_message,
- uint32_t error_message_length) {
+ uint32_t error_message_size) {
// UMA to investigate http://crbug.com/410630
// TODO(xhwang): Remove after bug is fixed.
if (system_code == 0x27) {
@@ -682,10 +681,8 @@ void CdmAdapter::OnRejectPromise(uint32_t promise_id,
kSizeKBBuckets);
}
- RejectPromise(promise_id,
- error,
- system_code,
- std::string(error_message, error_message_length));
+ RejectPromise(promise_id, error, system_code,
+ std::string(error_message, error_message_size));
}
void CdmAdapter::RejectPromise(uint32_t promise_id,
@@ -698,56 +695,84 @@ void CdmAdapter::RejectPromise(uint32_t promise_id,
SessionError(error, system_code, error_message)));
}
-void CdmAdapter::OnSessionMessage(const char* web_session_id,
- uint32_t web_session_id_length,
+void CdmAdapter::OnSessionMessage(const char* session_id,
+ uint32_t session_id_size,
+ cdm::MessageType message_type,
const char* message,
- uint32_t message_length,
- const char* destination_url,
- uint32_t destination_url_length) {
+ uint32_t message_size,
+ const char* legacy_destination_url,
+ uint32_t legacy_destination_url_size) {
+ // License requests should not specify |legacy_destination_url|.
+ // |legacy_destination_url| is not passed to unprefixed EME applications,
+ // so it can be removed when the prefixed API is removed.
+ PP_DCHECK(legacy_destination_url_size == 0 ||
+ message_type != cdm::MessageType::kLicenseRequest);
+
PostOnMain(callback_factory_.NewCallback(
&CdmAdapter::SendSessionMessageInternal,
- std::string(web_session_id, web_session_id_length),
- std::vector<uint8>(message, message + message_length),
- std::string(destination_url, destination_url_length)));
-}
+ SessionMessage(
+ std::string(session_id, session_id_size), message_type, message,
+ message_size,
+ std::string(legacy_destination_url, legacy_destination_url_size))));
+}
+
+void CdmAdapter::OnSessionKeysChange(const char* session_id,
+ uint32_t session_id_size,
+ bool has_additional_usable_key,
+ const cdm::KeyInformation* keys_info,
+ uint32_t keys_info_count) {
+ std::vector<PP_KeyInformation> key_information;
+ for (uint32_t i = 0; i < keys_info_count; ++i) {
+ const auto& key_info = keys_info[i];
+ PP_KeyInformation next_key = {};
+
+ if (key_info.key_id_size > sizeof(next_key.key_id)) {
+ PP_NOTREACHED();
+ continue;
+ }
+
+ // Copy key_id into |next_key|.
+ memcpy(next_key.key_id, key_info.key_id, key_info.key_id_size);
+
+ // Set remaining fields on |next_key|.
+ next_key.key_id_size = key_info.key_id_size;
+ next_key.key_status = CdmKeyStatusToPpKeyStatus(key_info.status);
+ next_key.system_code = key_info.system_code;
+ key_information.push_back(next_key);
+ }
-void CdmAdapter::OnSessionUsableKeysChange(const char* web_session_id,
- uint32_t web_session_id_length,
- bool has_additional_usable_key) {
PostOnMain(callback_factory_.NewCallback(
- &CdmAdapter::SendSessionUsableKeysChangeInternal,
- std::string(web_session_id, web_session_id_length),
- has_additional_usable_key));
+ &CdmAdapter::SendSessionKeysChangeInternal,
+ std::string(session_id, session_id_size), has_additional_usable_key,
+ key_information));
}
-void CdmAdapter::OnExpirationChange(const char* web_session_id,
- uint32_t web_session_id_length,
+void CdmAdapter::OnExpirationChange(const char* session_id,
+ uint32_t session_id_size,
cdm::Time new_expiry_time) {
PostOnMain(callback_factory_.NewCallback(
&CdmAdapter::SendExpirationChangeInternal,
- std::string(web_session_id, web_session_id_length),
- new_expiry_time));
+ std::string(session_id, session_id_size), new_expiry_time));
}
-void CdmAdapter::OnSessionClosed(const char* web_session_id,
- uint32_t web_session_id_length) {
- PostOnMain(callback_factory_.NewCallback(
- &CdmAdapter::SendSessionClosedInternal,
- std::string(web_session_id, web_session_id_length)));
+void CdmAdapter::OnSessionClosed(const char* session_id,
+ uint32_t session_id_size) {
+ PostOnMain(
+ callback_factory_.NewCallback(&CdmAdapter::SendSessionClosedInternal,
+ std::string(session_id, session_id_size)));
}
-void CdmAdapter::OnSessionError(const char* web_session_id,
- uint32_t web_session_id_length,
- cdm::Error error,
- uint32_t system_code,
- const char* error_message,
- uint32_t error_message_length) {
+void CdmAdapter::OnLegacySessionError(const char* session_id,
+ uint32_t session_id_size,
+ cdm::Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_size) {
PostOnMain(callback_factory_.NewCallback(
&CdmAdapter::SendSessionErrorInternal,
- std::string(web_session_id, web_session_id_length),
- SessionError(error,
- system_code,
- std::string(error_message, error_message_length))));
+ std::string(session_id, session_id_size),
+ SessionError(error, system_code,
+ std::string(error_message, error_message_size))));
}
// Helpers to pass the event to Pepper.
@@ -761,18 +786,10 @@ void CdmAdapter::SendPromiseResolvedInternal(int32_t result,
void CdmAdapter::SendPromiseResolvedWithSessionInternal(
int32_t result,
uint32_t promise_id,
- const std::string& web_session_id) {
+ const std::string& session_id) {
PP_DCHECK(result == PP_OK);
pp::ContentDecryptor_Private::PromiseResolvedWithSession(promise_id,
- web_session_id);
-}
-
-void CdmAdapter::SendPromiseResolvedWithUsableKeyIdsInternal(
- int32_t result,
- uint32_t promise_id,
- std::vector<std::vector<uint8> > key_ids) {
- PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::PromiseResolvedWithKeyIds(promise_id, key_ids);
+ session_id);
}
void CdmAdapter::SendPromiseRejectedInternal(int32_t result,
@@ -786,59 +803,51 @@ void CdmAdapter::SendPromiseRejectedInternal(int32_t result,
error.error_description);
}
-void CdmAdapter::SendSessionMessageInternal(
- int32_t result,
- const std::string& web_session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) {
+void CdmAdapter::SendSessionMessageInternal(int32_t result,
+ const SessionMessage& message) {
PP_DCHECK(result == PP_OK);
- pp::VarArrayBuffer message_array_buffer(message.size());
- if (message.size() > 0) {
- memcpy(message_array_buffer.Map(), message.data(), message.size());
+ pp::VarArrayBuffer message_array_buffer(message.message.size());
+ if (message.message.size() > 0) {
+ memcpy(message_array_buffer.Map(), message.message.data(),
+ message.message.size());
}
pp::ContentDecryptor_Private::SessionMessage(
- web_session_id, message_array_buffer, destination_url);
-}
-
-void CdmAdapter::SendSessionReadyInternal(int32_t result,
- const std::string& web_session_id) {
- PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionReady(web_session_id);
+ message.session_id, CdmMessageTypeToPpMessageType(message.message_type),
+ message_array_buffer, message.legacy_destination_url);
}
void CdmAdapter::SendSessionClosedInternal(int32_t result,
- const std::string& web_session_id) {
+ const std::string& session_id) {
PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionClosed(web_session_id);
+ pp::ContentDecryptor_Private::SessionClosed(session_id);
}
void CdmAdapter::SendSessionErrorInternal(int32_t result,
- const std::string& web_session_id,
+ const std::string& session_id,
const SessionError& error) {
PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionError(
- web_session_id,
- CdmExceptionTypeToPpCdmExceptionType(error.error),
- error.system_code,
- error.error_description);
+ pp::ContentDecryptor_Private::LegacySessionError(
+ session_id, CdmExceptionTypeToPpCdmExceptionType(error.error),
+ error.system_code, error.error_description);
}
-void CdmAdapter::SendSessionUsableKeysChangeInternal(
+void CdmAdapter::SendSessionKeysChangeInternal(
int32_t result,
- const std::string& web_session_id,
- bool has_additional_usable_key) {
+ const std::string& session_id,
+ bool has_additional_usable_key,
+ const std::vector<PP_KeyInformation>& key_info) {
PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionKeysChange(web_session_id,
- has_additional_usable_key);
+ pp::ContentDecryptor_Private::SessionKeysChange(
+ session_id, has_additional_usable_key, key_info);
}
void CdmAdapter::SendExpirationChangeInternal(int32_t result,
- const std::string& web_session_id,
+ const std::string& session_id,
cdm::Time new_expiry_time) {
PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionExpirationChange(web_session_id,
+ pp::ContentDecryptor_Private::SessionExpirationChange(session_id,
new_expiry_time);
}
@@ -1033,33 +1042,38 @@ void CdmAdapter::LogToConsole(const pp::Var& value) {
}
#endif // !defined(NDEBUG)
-void CdmAdapter::SendPlatformChallenge(
- const char* service_id, uint32_t service_id_length,
- const char* challenge, uint32_t challenge_length) {
+void CdmAdapter::SendPlatformChallenge(const char* service_id,
+ uint32_t service_id_size,
+ const char* challenge,
+ uint32_t challenge_size) {
#if defined(OS_CHROMEOS)
- pp::VarArrayBuffer challenge_var(challenge_length);
- uint8_t* var_data = static_cast<uint8_t*>(challenge_var.Map());
- memcpy(var_data, challenge, challenge_length);
-
- std::string service_id_str(service_id, service_id_length);
-
- linked_ptr<PepperPlatformChallengeResponse> response(
- new PepperPlatformChallengeResponse());
-
- int32_t result = platform_verification_.ChallengePlatform(
- pp::Var(service_id_str),
- challenge_var,
- &response->signed_data,
- &response->signed_data_signature,
- &response->platform_key_certificate,
- callback_factory_.NewCallback(&CdmAdapter::SendPlatformChallengeDone,
- response));
- challenge_var.Unmap();
- if (result == PP_OK_COMPLETIONPENDING)
- return;
+ // If access to a distinctive identifier is not allowed, block platform
+ // verification to prevent access to such an identifier.
+ if (allow_distinctive_identifier_) {
+ pp::VarArrayBuffer challenge_var(challenge_size);
+ uint8_t* var_data = static_cast<uint8_t*>(challenge_var.Map());
+ memcpy(var_data, challenge, challenge_size);
+
+ std::string service_id_str(service_id, service_id_size);
+
+ linked_ptr<PepperPlatformChallengeResponse> response(
+ new PepperPlatformChallengeResponse());
+
+ int32_t result = platform_verification_.ChallengePlatform(
+ pp::Var(service_id_str),
+ challenge_var,
+ &response->signed_data,
+ &response->signed_data_signature,
+ &response->platform_key_certificate,
+ callback_factory_.NewCallback(&CdmAdapter::SendPlatformChallengeDone,
+ response));
+ challenge_var.Unmap();
+ if (result == PP_OK_COMPLETIONPENDING)
+ return;
- // Fall through on error and issue an empty OnPlatformChallengeResponse().
- PP_DCHECK(result != PP_OK);
+ // Fall through on error and issue an empty OnPlatformChallengeResponse().
+ PP_DCHECK(result != PP_OK);
+ }
#endif
cdm::PlatformChallengeResponse platform_challenge_response = {};
@@ -1098,9 +1112,9 @@ void CdmAdapter::QueryOutputProtectionStatus() {
// Fall through on error and issue an empty OnQueryOutputProtectionStatus().
PP_DCHECK(result != PP_OK);
+ CDM_DLOG() << __FUNCTION__ << " failed, result = " << result;
#endif
-
- cdm_->OnQueryOutputProtectionStatus(0, 0);
+ cdm_->OnQueryOutputProtectionStatus(cdm::kQueryFailed, 0, 0);
}
void CdmAdapter::OnDeferredInitializationDone(cdm::StreamType stream_type,
@@ -1131,9 +1145,14 @@ void CdmAdapter::OnDeferredInitializationDone(cdm::StreamType stream_type,
// The CDM owns the returned object and must call FileIO::Close() to release it.
cdm::FileIO* CdmAdapter::CreateFileIO(cdm::FileIOClient* client) {
+ if (!allow_persistent_state_) {
+ CDM_DLOG()
+ << "Cannot create FileIO because persistent state is not allowed.";
+ return nullptr;
+ }
+
return new CdmFileIOImpl(
- client,
- pp_instance(),
+ client, pp_instance(),
callback_factory_.NewCallback(&CdmAdapter::OnFirstFileRead));
}
@@ -1222,32 +1241,49 @@ void CdmAdapter::QueryOutputProtectionStatusDone(int32_t result) {
PP_DCHECK(query_output_protection_in_progress_);
query_output_protection_in_progress_ = false;
- // Return a protection status of none on error.
- if (result != PP_OK)
+ // Return a query status of failed on error.
+ cdm::QueryResult query_result;
+ if (result != PP_OK) {
+ CDM_DLOG() << __FUNCTION__ << " failed, result = " << result;
output_link_mask_ = output_protection_mask_ = 0;
- else
+ query_result = cdm::kQueryFailed;
+ } else {
+ query_result = cdm::kQuerySucceeded;
ReportOutputProtectionQueryResult();
+ }
- cdm_->OnQueryOutputProtectionStatus(output_link_mask_,
+ cdm_->OnQueryOutputProtectionStatus(query_result, output_link_mask_,
output_protection_mask_);
}
#endif
CdmAdapter::SessionError::SessionError(cdm::Error error,
uint32_t system_code,
- std::string error_description)
+ const std::string& error_description)
: error(error),
system_code(system_code),
error_description(error_description) {
}
+CdmAdapter::SessionMessage::SessionMessage(
+ const std::string& session_id,
+ cdm::MessageType message_type,
+ const char* message,
+ uint32_t message_size,
+ const std::string& legacy_destination_url)
+ : session_id(session_id),
+ message_type(message_type),
+ message(message, message + message_size),
+ legacy_destination_url(legacy_destination_url) {
+}
+
void* GetCdmHost(int host_interface_version, void* user_data) {
if (!host_interface_version || !user_data)
return NULL;
- COMPILE_ASSERT(
- cdm::ContentDecryptionModule::Host::kVersion == cdm::Host_6::kVersion,
- update_code_below);
+ static_assert(
+ cdm::ContentDecryptionModule::Host::kVersion == cdm::Host_8::kVersion,
+ "update the code below");
// Ensure IsSupportedCdmHostVersion matches implementation of this function.
// Always update this DCHECK when updating this function.
@@ -1256,19 +1292,22 @@ void* GetCdmHost(int host_interface_version, void* user_data) {
PP_DCHECK(
// Future version is not supported.
- !IsSupportedCdmHostVersion(cdm::Host_6::kVersion + 1) &&
+ !IsSupportedCdmHostVersion(cdm::Host_8::kVersion + 1) &&
// Current version is supported.
- IsSupportedCdmHostVersion(cdm::Host_6::kVersion) &&
+ IsSupportedCdmHostVersion(cdm::Host_8::kVersion) &&
// Include all previous supported versions (if any) here.
+ IsSupportedCdmHostVersion(cdm::Host_7::kVersion) &&
// One older than the oldest supported version is not supported.
- !IsSupportedCdmHostVersion(cdm::Host_6::kVersion - 1));
+ !IsSupportedCdmHostVersion(cdm::Host_7::kVersion - 1));
PP_DCHECK(IsSupportedCdmHostVersion(host_interface_version));
CdmAdapter* cdm_adapter = static_cast<CdmAdapter*>(user_data);
CDM_DLOG() << "Create CDM Host with version " << host_interface_version;
switch (host_interface_version) {
- case cdm::Host_6::kVersion:
- return static_cast<cdm::Host_6*>(cdm_adapter);
+ case cdm::Host_8::kVersion:
+ return static_cast<cdm::Host_8*>(cdm_adapter);
+ case cdm::Host_7::kVersion:
+ return static_cast<cdm::Host_7*>(cdm_adapter);
default:
PP_NOTREACHED();
return NULL;
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.gni b/chromium/media/cdm/ppapi/cdm_adapter.gni
new file mode 100644
index 00000000000..cee8ce62c60
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_adapter.gni
@@ -0,0 +1,129 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This template defines a CDM adapter target. Just use this as you would a
+# normal target and everything should work correctly. If GYP, you would instead
+# depend on media/media_cdm_adapter.gyp:cdmadapter which would in turn modify
+# your target with direct_dependent_settings.
+template("cdm_adapter") {
+ # TODO(GYP) On Mac/Linux this should be a loadable_module.
+ shared_library(target_name) {
+ # Don't filter sources list again.
+ set_sources_assignment_filter([])
+
+ sources = [
+ "//media/cdm/ppapi/api/content_decryption_module.h",
+ "//media/cdm/ppapi/cdm_adapter.cc",
+ "//media/cdm/ppapi/cdm_adapter.h",
+ "//media/cdm/ppapi/cdm_file_io_impl.cc",
+ "//media/cdm/ppapi/cdm_file_io_impl.h",
+ "//media/cdm/ppapi/cdm_helpers.cc",
+ "//media/cdm/ppapi/cdm_helpers.h",
+ "//media/cdm/ppapi/cdm_logging.cc",
+ "//media/cdm/ppapi/cdm_logging.h",
+ "//media/cdm/ppapi/cdm_wrapper.h",
+ "//media/cdm/ppapi/linked_ptr.h",
+ "//media/cdm/ppapi/supported_cdm_versions.h",
+ ]
+ if (defined(invoker.sources)) {
+ sources += invoker.sources
+ }
+
+ if (is_mac) {
+ ldflags = [
+ # Not to strip important symbols by -Wl,-dead_strip.
+ "-Wl,-exported_symbol,_PPP_GetInterface",
+ "-Wl,-exported_symbol,_PPP_InitializeModule",
+ "-Wl,-exported_symbol,_PPP_ShutdownModule",
+ ]
+ #TODO(GYP) Mac: 'DYLIB_INSTALL_NAME_BASE': '@loader_path',
+ } else if (is_posix && !is_mac) {
+ cflags = [ "-fvisibility=hidden" ]
+ # Note GYP sets rpath but this is set by default on shared libraries in
+ # the GN build.
+ }
+
+ # TODO(jschuh) crbug.com/167187
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ if (defined(invoker.all_dependent_configs)) {
+ all_dependent_configs = invoker.all_dependent_configs
+ }
+ if (defined(invoker.allow_circular_includes_from)) {
+ allow_circular_includes_from = invoker.allow_circular_includes_from
+ }
+ if (defined(invoker.cflags)) {
+ cflags = invoker.cflags
+ }
+ if (defined(invoker.cflags_c)) {
+ cflags_c = invoker.cflags_c
+ }
+ if (defined(invoker.cflags_cc)) {
+ cflags_cc = invoker.cflags_cc
+ }
+ if (defined(invoker.cflags_objc)) {
+ cflags_objc = invoker.cflags_objc
+ }
+ if (defined(invoker.cflags_objcc)) {
+ cflags_objcc = invoker.cflags_objcc
+ }
+ if (defined(invoker.check_includes)) {
+ check_includes = invoker.check_includes
+ }
+ if (defined(invoker.data)) {
+ data = invoker.data
+ }
+ if (defined(invoker.data_deps)) {
+ data_deps = invoker.data_deps
+ }
+ if (defined(invoker.datadeps)) {
+ datadeps = invoker.datadeps
+ }
+ if (defined(invoker.defines)) {
+ defines = invoker.defines
+ }
+ if (defined(invoker.deps)) {
+ deps = invoker.deps
+ }
+ if (defined(invoker.direct_dependent_configs)) {
+ direct_dependent_configs = invoker.direct_dependent_configs
+ }
+ if (defined(invoker.forward_dependent_configs_from)) {
+ forward_dependent_configs_from = invoker.forward_dependent_configs_from
+ }
+ if (defined(invoker.include_dirs)) {
+ include_dirs = invoker.include_dirs
+ }
+ if (defined(invoker.ldflags)) {
+ ldflags = invoker.ldflags
+ }
+ if (defined(invoker.lib_dirs)) {
+ lib_dirs = invoker.lib_dirs
+ }
+ if (defined(invoker.libs)) {
+ libs = invoker.libs
+ }
+ if (defined(invoker.output_extension)) {
+ output_extension = invoker.output_extension
+ }
+ if (defined(invoker.output_name)) {
+ output_name = invoker.output_name
+ }
+ if (defined(invoker.public)) {
+ public = invoker.public
+ }
+ if (defined(invoker.public_configs)) {
+ public_configs = invoker.public_configs
+ }
+ if (defined(invoker.public_deps)) {
+ public_deps = invoker.public_deps
+ }
+ if (defined(invoker.testonly)) {
+ testonly = invoker.testonly
+ }
+ if (defined(invoker.visibility)) {
+ visibility = invoker.visibility
+ }
+ }
+}
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.h b/chromium/media/cdm/ppapi/cdm_adapter.h
index 9ceaea7b293..0eb967605ea 100644
--- a/chromium/media/cdm/ppapi/cdm_adapter.h
+++ b/chromium/media/cdm/ppapi/cdm_adapter.h
@@ -37,7 +37,8 @@ void* GetCdmHost(int host_interface_version, void* user_data);
// Content Decryption Module (CDM).
class CdmAdapter : public pp::Instance,
public pp::ContentDecryptor_Private,
- public cdm::Host_6 {
+ public cdm::Host_7,
+ public cdm::Host_8 {
public:
CdmAdapter(PP_Instance instance, pp::Module* module);
virtual ~CdmAdapter();
@@ -50,92 +51,85 @@ class CdmAdapter : public pp::Instance,
// PPP_ContentDecryptor_Private implementation.
// Note: Results of calls to these methods must be reported through the
// PPB_ContentDecryptor_Private interface.
- virtual void Initialize(const std::string& key_system) override;
- virtual void SetServerCertificate(
- uint32_t promise_id,
- pp::VarArrayBuffer server_certificate) override;
- virtual void CreateSession(uint32_t promise_id,
- const std::string& init_data_type,
- pp::VarArrayBuffer init_data,
- PP_SessionType session_type) override;
- virtual void LoadSession(uint32_t promise_id,
- const std::string& web_session_id) override;
- virtual void UpdateSession(uint32_t promise_id,
- const std::string& web_session_id,
- pp::VarArrayBuffer response) override;
- virtual void CloseSession(uint32_t promise_id,
- const std::string& web_session_id);
- virtual void RemoveSession(uint32_t promise_id,
- const std::string& web_session_id) override;
- virtual void GetUsableKeyIds(uint32_t promise_id,
- const std::string& web_session_id) override;
- virtual void Decrypt(
- pp::Buffer_Dev encrypted_buffer,
- const PP_EncryptedBlockInfo& encrypted_block_info) override;
- virtual void InitializeAudioDecoder(
- const PP_AudioDecoderConfig& decoder_config,
- pp::Buffer_Dev extra_data_buffer) override;
- virtual void InitializeVideoDecoder(
- const PP_VideoDecoderConfig& decoder_config,
- pp::Buffer_Dev extra_data_buffer) override;
- virtual void DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
- uint32_t request_id) override;
- virtual void ResetDecoder(PP_DecryptorStreamType decoder_type,
- uint32_t request_id) override;
- virtual void DecryptAndDecode(
+ void Initialize(uint32_t promise_id,
+ const std::string& key_system,
+ bool allow_distinctive_identifier,
+ bool allow_persistent_state) override;
+ void SetServerCertificate(uint32_t promise_id,
+ pp::VarArrayBuffer server_certificate) override;
+ void CreateSessionAndGenerateRequest(uint32_t promise_id,
+ PP_SessionType session_type,
+ PP_InitDataType init_data_type,
+ pp::VarArrayBuffer init_data) override;
+ void LoadSession(uint32_t promise_id,
+ PP_SessionType session_type,
+ const std::string& session_id) override;
+ void UpdateSession(uint32_t promise_id,
+ const std::string& session_id,
+ pp::VarArrayBuffer response) override;
+ void CloseSession(uint32_t promise_id, const std::string& session_id);
+ void RemoveSession(uint32_t promise_id,
+ const std::string& session_id) override;
+ void Decrypt(pp::Buffer_Dev encrypted_buffer,
+ const PP_EncryptedBlockInfo& encrypted_block_info) override;
+ void InitializeAudioDecoder(const PP_AudioDecoderConfig& decoder_config,
+ pp::Buffer_Dev extra_data_buffer) override;
+ void InitializeVideoDecoder(const PP_VideoDecoderConfig& decoder_config,
+ pp::Buffer_Dev extra_data_buffer) override;
+ void DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) override;
+ void ResetDecoder(PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) override;
+ void DecryptAndDecode(
PP_DecryptorStreamType decoder_type,
pp::Buffer_Dev encrypted_buffer,
const PP_EncryptedBlockInfo& encrypted_block_info) override;
- // cdm::Host_6 implementation.
- virtual cdm::Buffer* Allocate(uint32_t capacity) override;
- virtual void SetTimer(int64_t delay_ms, void* context) override;
- virtual cdm::Time GetCurrentWallTime() override;
- virtual void OnResolveNewSessionPromise(
- uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) override;
- virtual void OnResolvePromise(uint32_t promise_id) override;
- virtual void OnResolveKeyIdsPromise(uint32_t promise_id,
- const cdm::BinaryData* usable_key_ids,
- uint32_t usable_key_ids_length) override;
- virtual void OnRejectPromise(uint32_t promise_id,
- cdm::Error error,
- uint32_t system_code,
- const char* error_message,
- uint32_t error_message_length) override;
- virtual void OnSessionMessage(const char* web_session_id,
- uint32_t web_session_id_length,
- const char* message,
- uint32_t message_length,
- const char* destination_url,
- uint32_t destination_url_length) override;
- virtual void OnSessionUsableKeysChange(
- const char* web_session_id,
- uint32_t web_session_id_length,
- bool has_additional_usable_key) override;
- virtual void OnExpirationChange(const char* web_session_id,
- uint32_t web_session_id_length,
- cdm::Time new_expiry_time) override;
- virtual void OnSessionClosed(const char* web_session_id,
- uint32_t web_session_id_length) override;
- virtual void OnSessionError(const char* web_session_id,
- uint32_t web_session_id_length,
- cdm::Error error,
- uint32_t system_code,
- const char* error_message,
- uint32_t error_message_length) override;
- virtual void SendPlatformChallenge(const char* service_id,
- uint32_t service_id_length,
- const char* challenge,
- uint32_t challenge_length) override;
- virtual void EnableOutputProtection(
- uint32_t desired_protection_mask) override;
- virtual void QueryOutputProtectionStatus() override;
- virtual void OnDeferredInitializationDone(
- cdm::StreamType stream_type,
- cdm::Status decoder_status) override;
- virtual cdm::FileIO* CreateFileIO(cdm::FileIOClient* client) override;
+ // cdm::Host_7 and cdm::Host_8 implementation.
+ cdm::Buffer* Allocate(uint32_t capacity) override;
+ void SetTimer(int64_t delay_ms, void* context) override;
+ cdm::Time GetCurrentWallTime() override;
+ void OnResolveNewSessionPromise(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) override;
+ void OnResolvePromise(uint32_t promise_id) override;
+ void OnRejectPromise(uint32_t promise_id,
+ cdm::Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_size) override;
+ void OnSessionMessage(const char* session_id,
+ uint32_t session_id_size,
+ cdm::MessageType message_type,
+ const char* message,
+ uint32_t message_size,
+ const char* legacy_destination_url,
+ uint32_t legacy_destination_url_size) override;
+ void OnSessionKeysChange(const char* session_id,
+ uint32_t session_id_size,
+ bool has_additional_usable_key,
+ const cdm::KeyInformation* keys_info,
+ uint32_t keys_info_count) override;
+ void OnExpirationChange(const char* session_id,
+ uint32_t session_id_size,
+ cdm::Time new_expiry_time) override;
+ void OnSessionClosed(const char* session_id,
+ uint32_t session_id_size) override;
+ void OnLegacySessionError(const char* session_id,
+ uint32_t session_id_size,
+ cdm::Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_size) override;
+ void SendPlatformChallenge(const char* service_id,
+ uint32_t service_id_size,
+ const char* challenge,
+ uint32_t challenge_size) override;
+ void EnableOutputProtection(uint32_t desired_protection_mask) override;
+ void QueryOutputProtectionStatus() override;
+ void OnDeferredInitializationDone(cdm::StreamType stream_type,
+ cdm::Status decoder_status) override;
+ cdm::FileIO* CreateFileIO(cdm::FileIOClient* client) override;
private:
// These are reported to UMA server. Do not change the existing values!
@@ -153,45 +147,49 @@ class CdmAdapter : public pp::Instance,
struct SessionError {
SessionError(cdm::Error error,
uint32_t system_code,
- std::string error_description);
+ const std::string& error_description);
cdm::Error error;
uint32_t system_code;
std::string error_description;
};
- bool CreateCdmInstance(const std::string& key_system);
+ struct SessionMessage {
+ SessionMessage(const std::string& session_id,
+ cdm::MessageType message_type,
+ const char* message,
+ uint32_t message_size,
+ const std::string& legacy_destination_url);
+ std::string session_id;
+ cdm::MessageType message_type;
+ std::vector<uint8_t> message;
+ std::string legacy_destination_url;
+ };
+
+ CdmWrapper* CreateCdmInstance(const std::string& key_system);
// <code>PPB_ContentDecryptor_Private</code> dispatchers. These are passed to
// <code>callback_factory_</code> to ensure that calls into
// <code>PPP_ContentDecryptor_Private</code> are asynchronous.
void SendPromiseResolvedInternal(int32_t result, uint32_t promise_id);
- void SendPromiseResolvedWithSessionInternal(
- int32_t result,
- uint32_t promise_id,
- const std::string& web_session_id);
- void SendPromiseResolvedWithUsableKeyIdsInternal(
- int32_t result,
- uint32_t promise_id,
- std::vector<std::vector<uint8> > key_ids);
+ void SendPromiseResolvedWithSessionInternal(int32_t result,
+ uint32_t promise_id,
+ const std::string& session_id);
void SendPromiseRejectedInternal(int32_t result,
uint32_t promise_id,
const SessionError& error);
void SendSessionMessageInternal(int32_t result,
- const std::string& web_session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url);
- void SendSessionReadyInternal(int32_t result,
- const std::string& web_session_id);
- void SendSessionClosedInternal(int32_t result,
- const std::string& web_session_id);
+ const SessionMessage& message);
+ void SendSessionClosedInternal(int32_t result, const std::string& session_id);
void SendSessionErrorInternal(int32_t result,
- const std::string& web_session_id,
+ const std::string& session_id,
const SessionError& error);
- void SendSessionUsableKeysChangeInternal(int32_t result,
- const std::string& web_session_id,
- bool has_additional_usable_key);
+ void SendSessionKeysChangeInternal(
+ int32_t result,
+ const std::string& session_id,
+ bool has_additional_usable_key,
+ const std::vector<PP_KeyInformation>& key_info);
void SendExpirationChangeInternal(int32_t result,
- const std::string& web_session_id,
+ const std::string& session_id,
cdm::Time new_expiry_time);
void RejectPromise(uint32_t promise_id,
cdm::Error error,
@@ -271,6 +269,8 @@ class CdmAdapter : public pp::Instance,
pp::CompletionCallbackFactory<CdmAdapter> callback_factory_;
linked_ptr<CdmWrapper> cdm_;
std::string key_system_;
+ bool allow_distinctive_identifier_;
+ bool allow_persistent_state_;
// If the CDM returned kDeferredInitialization during InitializeAudioDecoder()
// or InitializeVideoDecoder(), the (Audio|Video)DecoderConfig.request_id is
diff --git a/chromium/media/cdm/ppapi/cdm_file_io_impl.cc b/chromium/media/cdm/ppapi/cdm_file_io_impl.cc
index 3dbff43fd05..d173788580a 100644
--- a/chromium/media/cdm/ppapi/cdm_file_io_impl.cc
+++ b/chromium/media/cdm/ppapi/cdm_file_io_impl.cc
@@ -4,7 +4,6 @@
#include "media/cdm/ppapi/cdm_file_io_impl.h"
-#include <algorithm>
#include <sstream>
#include "media/cdm/ppapi/cdm_logging.h"
diff --git a/chromium/media/cdm/ppapi/cdm_file_io_impl.h b/chromium/media/cdm/ppapi/cdm_file_io_impl.h
index d885f1cad35..8e7a88b1a20 100644
--- a/chromium/media/cdm/ppapi/cdm_file_io_impl.h
+++ b/chromium/media/cdm/ppapi/cdm_file_io_impl.h
@@ -54,10 +54,10 @@ class CdmFileIOImpl : public cdm::FileIO {
const pp::CompletionCallback& first_file_read_cb);
// cdm::FileIO implementation.
- virtual void Open(const char* file_name, uint32_t file_name_size) override;
- virtual void Read() override;
- virtual void Write(const uint8_t* data, uint32_t data_size) override;
- virtual void Close() override;
+ void Open(const char* file_name, uint32_t file_name_size) override;
+ void Read() override;
+ void Write(const uint8_t* data, uint32_t data_size) override;
+ void Close() override;
private:
// TODO(xhwang): Introduce more detailed states for UMA logging if needed.
diff --git a/chromium/media/cdm/ppapi/cdm_file_io_test.h b/chromium/media/cdm/ppapi/cdm_file_io_test.h
index 189c1f6447f..ccacec84294 100644
--- a/chromium/media/cdm/ppapi/cdm_file_io_test.h
+++ b/chromium/media/cdm/ppapi/cdm_file_io_test.h
@@ -62,7 +62,7 @@ class FileIOTest : public cdm::FileIOClient {
FileIOTest(const CreateFileIOCB& create_file_io_cb,
const std::string& test_name);
- ~FileIOTest();
+ ~FileIOTest() override;
// Adds a test step in this test. |this| object doesn't take the ownership of
// |data|, which should be valid throughout the lifetime of |this| object.
@@ -96,11 +96,11 @@ class FileIOTest : public cdm::FileIOClient {
static bool MatchesResult(const TestStep& a, const TestStep& b);
// cdm::FileIOClient implementation.
- virtual void OnOpenComplete(Status status) override;
- virtual void OnReadComplete(Status status,
- const uint8_t* data,
- uint32_t data_size) override;
- virtual void OnWriteComplete(Status status) override;
+ void OnOpenComplete(Status status) override;
+ void OnReadComplete(Status status,
+ const uint8_t* data,
+ uint32_t data_size) override;
+ void OnWriteComplete(Status status) override;
// Runs the next step in this test case.
void RunNextStep();
diff --git a/chromium/media/cdm/ppapi/cdm_helpers.cc b/chromium/media/cdm/ppapi/cdm_helpers.cc
index 62f93a009dd..00e7595a001 100644
--- a/chromium/media/cdm/ppapi/cdm_helpers.cc
+++ b/chromium/media/cdm/ppapi/cdm_helpers.cc
@@ -5,18 +5,8 @@
#include "media/cdm/ppapi/cdm_helpers.h"
#include <algorithm>
-#include <utility>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "build/build_config.h"
-#include "media/cdm/ppapi/api/content_decryption_module.h"
-#include "ppapi/c/pp_errors.h"
-#include "ppapi/c/pp_stdint.h"
+
#include "ppapi/cpp/core.h"
-#include "ppapi/cpp/dev/buffer_dev.h"
-#include "ppapi/cpp/instance.h"
-#include "ppapi/cpp/logging.h"
#include "ppapi/cpp/module.h"
namespace media {
diff --git a/chromium/media/cdm/ppapi/cdm_helpers.h b/chromium/media/cdm/ppapi/cdm_helpers.h
index 18c1d17012d..77cd8e3609b 100644
--- a/chromium/media/cdm/ppapi/cdm_helpers.h
+++ b/chromium/media/cdm/ppapi/cdm_helpers.h
@@ -33,11 +33,11 @@ class PpbBuffer : public cdm::Buffer {
PpbBufferAllocator* allocator);
// cdm::Buffer implementation.
- virtual void Destroy() override;
- virtual uint32_t Capacity() const override;
- virtual uint8_t* Data() override;
- virtual void SetSize(uint32_t size) override;
- virtual uint32_t Size() const override { return size_; }
+ void Destroy() override;
+ uint32_t Capacity() const override;
+ uint8_t* Data() override;
+ void SetSize(uint32_t size) override;
+ uint32_t Size() const override { return size_; }
// Takes the |buffer_| from this class and returns it.
// Note: The caller must ensure |allocator->Release()| is called later so that
@@ -54,7 +54,7 @@ class PpbBuffer : public cdm::Buffer {
PpbBuffer(pp::Buffer_Dev buffer,
uint32_t buffer_id,
PpbBufferAllocator* allocator);
- virtual ~PpbBuffer();
+ ~PpbBuffer() override;
pp::Buffer_Dev buffer_;
uint32_t buffer_id_;
@@ -95,17 +95,20 @@ class PpbBufferAllocator {
class DecryptedBlockImpl : public cdm::DecryptedBlock {
public:
DecryptedBlockImpl() : buffer_(NULL), timestamp_(0) {}
- virtual ~DecryptedBlockImpl() { if (buffer_) buffer_->Destroy(); }
+ ~DecryptedBlockImpl() override {
+ if (buffer_)
+ buffer_->Destroy();
+ }
- virtual void SetDecryptedBuffer(cdm::Buffer* buffer) override {
+ void SetDecryptedBuffer(cdm::Buffer* buffer) override {
buffer_ = static_cast<PpbBuffer*>(buffer);
}
- virtual cdm::Buffer* DecryptedBuffer() override { return buffer_; }
+ cdm::Buffer* DecryptedBuffer() override { return buffer_; }
- virtual void SetTimestamp(int64_t timestamp) override {
+ void SetTimestamp(int64_t timestamp) override {
timestamp_ = timestamp;
}
- virtual int64_t Timestamp() const override { return timestamp_; }
+ int64_t Timestamp() const override { return timestamp_; }
private:
PpbBuffer* buffer_;
@@ -117,44 +120,44 @@ class DecryptedBlockImpl : public cdm::DecryptedBlock {
class VideoFrameImpl : public cdm::VideoFrame {
public:
VideoFrameImpl();
- virtual ~VideoFrameImpl();
+ ~VideoFrameImpl() override;
- virtual void SetFormat(cdm::VideoFormat format) override {
+ void SetFormat(cdm::VideoFormat format) override {
format_ = format;
}
- virtual cdm::VideoFormat Format() const override { return format_; }
+ cdm::VideoFormat Format() const override { return format_; }
- virtual void SetSize(cdm::Size size) override { size_ = size; }
- virtual cdm::Size Size() const override { return size_; }
+ void SetSize(cdm::Size size) override { size_ = size; }
+ cdm::Size Size() const override { return size_; }
- virtual void SetFrameBuffer(cdm::Buffer* frame_buffer) override {
+ void SetFrameBuffer(cdm::Buffer* frame_buffer) override {
frame_buffer_ = static_cast<PpbBuffer*>(frame_buffer);
}
- virtual cdm::Buffer* FrameBuffer() override { return frame_buffer_; }
+ cdm::Buffer* FrameBuffer() override { return frame_buffer_; }
- virtual void SetPlaneOffset(cdm::VideoFrame::VideoPlane plane,
- uint32_t offset) override {
+ void SetPlaneOffset(cdm::VideoFrame::VideoPlane plane,
+ uint32_t offset) override {
PP_DCHECK(plane < kMaxPlanes);
plane_offsets_[plane] = offset;
}
- virtual uint32_t PlaneOffset(VideoPlane plane) override {
+ uint32_t PlaneOffset(VideoPlane plane) override {
PP_DCHECK(plane < kMaxPlanes);
return plane_offsets_[plane];
}
- virtual void SetStride(VideoPlane plane, uint32_t stride) override {
+ void SetStride(VideoPlane plane, uint32_t stride) override {
PP_DCHECK(plane < kMaxPlanes);
strides_[plane] = stride;
}
- virtual uint32_t Stride(VideoPlane plane) override {
+ uint32_t Stride(VideoPlane plane) override {
PP_DCHECK(plane < kMaxPlanes);
return strides_[plane];
}
- virtual void SetTimestamp(int64_t timestamp) override {
+ void SetTimestamp(int64_t timestamp) override {
timestamp_ = timestamp;
}
- virtual int64_t Timestamp() const override { return timestamp_; }
+ int64_t Timestamp() const override { return timestamp_; }
private:
// The video buffer format.
@@ -183,22 +186,22 @@ class VideoFrameImpl : public cdm::VideoFrame {
class AudioFramesImpl : public cdm::AudioFrames {
public:
AudioFramesImpl() : buffer_(NULL), format_(cdm::kUnknownAudioFormat) {}
- virtual ~AudioFramesImpl() {
+ ~AudioFramesImpl() override {
if (buffer_)
buffer_->Destroy();
}
// AudioFrames implementation.
- virtual void SetFrameBuffer(cdm::Buffer* buffer) override {
+ void SetFrameBuffer(cdm::Buffer* buffer) override {
buffer_ = static_cast<PpbBuffer*>(buffer);
}
- virtual cdm::Buffer* FrameBuffer() override {
+ cdm::Buffer* FrameBuffer() override {
return buffer_;
}
- virtual void SetFormat(cdm::AudioFormat format) override {
+ void SetFormat(cdm::AudioFormat format) override {
format_ = format;
}
- virtual cdm::AudioFormat Format() const override {
+ cdm::AudioFormat Format() const override {
return format_;
}
diff --git a/chromium/media/cdm/ppapi/cdm_logging.cc b/chromium/media/cdm/ppapi/cdm_logging.cc
index ff05930d9c7..4d47cec99c0 100644
--- a/chromium/media/cdm/ppapi/cdm_logging.cc
+++ b/chromium/media/cdm/ppapi/cdm_logging.cc
@@ -6,7 +6,6 @@
// protection that if the linker tries to link in strings/symbols appended to
// "DLOG() <<" in release build (which it shouldn't), we'll get "undefined
// reference" errors.
-#if !defined(NDEBUG)
#include "media/cdm/ppapi/cdm_logging.h"
@@ -34,10 +33,12 @@
#endif
#include <iomanip>
-#include <string>
+#include <iostream>
namespace media {
+#if !defined(NDEBUG)
+
namespace {
// Helper functions to wrap platform differences.
@@ -100,7 +101,7 @@ CdmLogMessage::CdmLogMessage(const char* file, int line) {
// Time and tick count.
time_t t = time(NULL);
struct tm local_time = {0};
-#if _MSC_VER >= 1400
+#ifdef _MSC_VER
localtime_s(&local_time, &t);
#else
localtime_r(&t, &local_time);
@@ -132,6 +133,10 @@ CdmLogMessage::~CdmLogMessage() {
std::cout << std::endl;
}
-} // namespace media
-
#endif // !defined(NDEBUG)
+
+std::ostream& CdmLogStream::stream() {
+ return std::cout;
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_logging.h b/chromium/media/cdm/ppapi/cdm_logging.h
index a7059182ff7..63961f2b78a 100644
--- a/chromium/media/cdm/ppapi/cdm_logging.h
+++ b/chromium/media/cdm/ppapi/cdm_logging.h
@@ -7,7 +7,7 @@
#ifndef MEDIA_CDM_PPAPI_CDM_LOGGING_H_
#define MEDIA_CDM_PPAPI_CDM_LOGGING_H_
-#include <iostream>
+#include <ostream>
#include <sstream>
#include <string>
@@ -30,6 +30,17 @@ class LogMessageVoidify {
} // namespace
+// This class is used to avoid having to include <iostream> in this file.
+class CdmLogStream {
+ public:
+ CdmLogStream() {}
+
+ // Retrieves the stream that we write to. This header cannot depend on
+ // <iostream> because that will add static initializers to all files that
+ // include this header. See crbug.com/94794.
+ std::ostream& stream();
+};
+
// This class serves two purposes:
// (1) It adds common headers to the log message, e.g. timestamp, process ID.
// (2) It adds a line break at the end of the log message.
@@ -52,13 +63,18 @@ class CdmLogMessage {
#define CDM_LAZY_STREAM(stream, condition) \
!(condition) ? (void) 0 : LogMessageVoidify() & (stream)
-#define CDM_DLOG() CDM_LAZY_STREAM(std::cout, CDM_DLOG_IS_ON()) \
- << CdmLogMessage(__FILE__, __LINE__).message()
-
#if defined(NDEBUG)
+// Logging is disabled for the release builds, theoretically the compiler should
+// take care of removing the references to CdmLogMessage but it's not always the
+// case when some specific optimizations are turned on (like PGO). Update the
+// macro to make sure that we don't try to do any logging or to refer to
+// CdmLogMessage in release.
#define CDM_DLOG_IS_ON() false
+#define CDM_DLOG() CDM_LAZY_STREAM(CdmLogStream().stream(), CDM_DLOG_IS_ON())
#else
#define CDM_DLOG_IS_ON() true
+#define CDM_DLOG() CDM_LAZY_STREAM(CdmLogStream().stream(), CDM_DLOG_IS_ON()) \
+ << CdmLogMessage(__FILE__, __LINE__).message()
#endif
} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_wrapper.h b/chromium/media/cdm/ppapi/cdm_wrapper.h
index 4cc54ed8648..3e48eb4ba0f 100644
--- a/chromium/media/cdm/ppapi/cdm_wrapper.h
+++ b/chromium/media/cdm/ppapi/cdm_wrapper.h
@@ -42,32 +42,31 @@ class CdmWrapper {
virtual ~CdmWrapper() {};
+ virtual void Initialize(bool allow_distinctive_identifier,
+ bool allow_persistent_state) = 0;
virtual void SetServerCertificate(uint32_t promise_id,
const uint8_t* server_certificate_data,
uint32_t server_certificate_data_size) = 0;
- virtual void CreateSession(uint32_t promise_id,
- const char* init_data_type,
- uint32_t init_data_type_size,
- const uint8_t* init_data,
- uint32_t init_data_size,
- cdm::SessionType session_type) = 0;
+ virtual void CreateSessionAndGenerateRequest(uint32_t promise_id,
+ cdm::SessionType session_type,
+ cdm::InitDataType init_data_type,
+ const uint8_t* init_data,
+ uint32_t init_data_size) = 0;
virtual void LoadSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) = 0;
+ cdm::SessionType session_type,
+ const char* session_id,
+ uint32_t session_id_size) = 0;
virtual void UpdateSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size,
+ const char* session_id,
+ uint32_t session_id_size,
const uint8_t* response,
uint32_t response_size) = 0;
virtual void CloseSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) = 0;
+ const char* session_id,
+ uint32_t session_id_size) = 0;
virtual void RemoveSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) = 0;
- virtual void GetUsableKeyIds(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) = 0;
+ const char* session_id,
+ uint32_t session_id_size) = 0;
virtual void TimerExpired(void* context) = 0;
virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
cdm::DecryptedBlock* decrypted_buffer) = 0;
@@ -86,6 +85,7 @@ class CdmWrapper {
virtual void OnPlatformChallengeResponse(
const cdm::PlatformChallengeResponse& response) = 0;
virtual void OnQueryOutputProtectionStatus(
+ cdm::QueryResult result,
uint32_t link_mask,
uint32_t output_protection_mask) = 0;
@@ -116,11 +116,16 @@ class CdmWrapperImpl : public CdmWrapper {
static_cast<CdmInterface*>(cdm_instance));
}
- virtual ~CdmWrapperImpl() {
+ ~CdmWrapperImpl() override {
cdm_->Destroy();
}
- virtual void SetServerCertificate(
+ void Initialize(bool allow_distinctive_identifier,
+ bool allow_persistent_state) override {
+ cdm_->Initialize(allow_distinctive_identifier, allow_persistent_state);
+ }
+
+ void SetServerCertificate(
uint32_t promise_id,
const uint8_t* server_certificate_data,
uint32_t server_certificate_data_size) override {
@@ -128,104 +133,94 @@ class CdmWrapperImpl : public CdmWrapper {
promise_id, server_certificate_data, server_certificate_data_size);
}
- virtual void CreateSession(uint32_t promise_id,
- const char* init_data_type,
- uint32_t init_data_type_size,
- const uint8_t* init_data,
- uint32_t init_data_size,
- cdm::SessionType session_type) override {
- cdm_->CreateSession(promise_id,
- init_data_type,
- init_data_type_size,
- init_data,
- init_data_size,
- session_type);
+ void CreateSessionAndGenerateRequest(
+ uint32_t promise_id,
+ cdm::SessionType session_type,
+ cdm::InitDataType init_data_type,
+ const uint8_t* init_data,
+ uint32_t init_data_size) override {
+ cdm_->CreateSessionAndGenerateRequest(
+ promise_id, session_type, init_data_type, init_data, init_data_size);
}
- virtual void LoadSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) override {
- cdm_->LoadSession(promise_id, web_session_id, web_session_id_size);
+ void LoadSession(uint32_t promise_id,
+ cdm::SessionType session_type,
+ const char* session_id,
+ uint32_t session_id_size) override {
+ cdm_->LoadSession(promise_id, session_type, session_id, session_id_size);
}
- virtual void UpdateSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size,
+ void UpdateSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size,
const uint8_t* response,
uint32_t response_size) override {
- cdm_->UpdateSession(promise_id,
- web_session_id,
- web_session_id_size,
- response,
+ cdm_->UpdateSession(promise_id, session_id, session_id_size, response,
response_size);
}
- virtual void CloseSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) override {
- cdm_->CloseSession(promise_id, web_session_id, web_session_id_size);
- }
-
- virtual void RemoveSession(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) override {
- cdm_->RemoveSession(promise_id, web_session_id, web_session_id_size);
+ void CloseSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) override {
+ cdm_->CloseSession(promise_id, session_id, session_id_size);
}
- virtual void GetUsableKeyIds(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_size) override {
- cdm_->GetUsableKeyIds(promise_id, web_session_id, web_session_id_size);
+ void RemoveSession(uint32_t promise_id,
+ const char* session_id,
+ uint32_t session_id_size) override {
+ cdm_->RemoveSession(promise_id, session_id, session_id_size);
}
- virtual void TimerExpired(void* context) override {
+ void TimerExpired(void* context) override {
cdm_->TimerExpired(context);
}
- virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
- cdm::DecryptedBlock* decrypted_buffer) override {
+ cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
+ cdm::DecryptedBlock* decrypted_buffer) override {
return cdm_->Decrypt(encrypted_buffer, decrypted_buffer);
}
- virtual cdm::Status InitializeAudioDecoder(
+ cdm::Status InitializeAudioDecoder(
const cdm::AudioDecoderConfig& audio_decoder_config) override {
return cdm_->InitializeAudioDecoder(audio_decoder_config);
}
- virtual cdm::Status InitializeVideoDecoder(
+ cdm::Status InitializeVideoDecoder(
const cdm::VideoDecoderConfig& video_decoder_config) override {
return cdm_->InitializeVideoDecoder(video_decoder_config);
}
- virtual void DeinitializeDecoder(cdm::StreamType decoder_type) override {
+ void DeinitializeDecoder(cdm::StreamType decoder_type) override {
cdm_->DeinitializeDecoder(decoder_type);
}
- virtual void ResetDecoder(cdm::StreamType decoder_type) override {
+ void ResetDecoder(cdm::StreamType decoder_type) override {
cdm_->ResetDecoder(decoder_type);
}
- virtual cdm::Status DecryptAndDecodeFrame(
+ cdm::Status DecryptAndDecodeFrame(
const cdm::InputBuffer& encrypted_buffer,
cdm::VideoFrame* video_frame) override {
return cdm_->DecryptAndDecodeFrame(encrypted_buffer, video_frame);
}
- virtual cdm::Status DecryptAndDecodeSamples(
+ cdm::Status DecryptAndDecodeSamples(
const cdm::InputBuffer& encrypted_buffer,
cdm::AudioFrames* audio_frames) override {
return cdm_->DecryptAndDecodeSamples(encrypted_buffer, audio_frames);
}
- virtual void OnPlatformChallengeResponse(
+ void OnPlatformChallengeResponse(
const cdm::PlatformChallengeResponse& response) override {
cdm_->OnPlatformChallengeResponse(response);
}
- virtual void OnQueryOutputProtectionStatus(
+ void OnQueryOutputProtectionStatus(
+ cdm::QueryResult result,
uint32_t link_mask,
uint32_t output_protection_mask) override {
- cdm_->OnQueryOutputProtectionStatus(link_mask, output_protection_mask);
+ cdm_->OnQueryOutputProtectionStatus(result, link_mask,
+ output_protection_mask);
}
private:
@@ -238,24 +233,60 @@ class CdmWrapperImpl : public CdmWrapper {
DISALLOW_COPY_AND_ASSIGN(CdmWrapperImpl);
};
+// Overrides for the cdm::Host_7 methods.
+// TODO(jrummell): Remove these once Host_7 interface is removed.
+
+template <>
+void CdmWrapperImpl<cdm::ContentDecryptionModule_7>::Initialize(
+ bool allow_distinctive_identifier,
+ bool allow_persistent_state) {
+}
+
+template <>
+void CdmWrapperImpl<cdm::ContentDecryptionModule_7>::
+ CreateSessionAndGenerateRequest(uint32_t promise_id,
+ cdm::SessionType session_type,
+ cdm::InitDataType init_data_type,
+ const uint8_t* init_data,
+ uint32_t init_data_size) {
+ std::string init_data_type_as_string = "unknown";
+ switch (init_data_type) {
+ case cdm::kCenc:
+ init_data_type_as_string = "cenc";
+ break;
+ case cdm::kKeyIds:
+ init_data_type_as_string = "keyids";
+ break;
+ case cdm::kWebM:
+ init_data_type_as_string = "webm";
+ break;
+ }
+
+ cdm_->CreateSessionAndGenerateRequest(
+ promise_id, session_type, &init_data_type_as_string[0],
+ init_data_type_as_string.length(), init_data, init_data_size);
+}
+
CdmWrapper* CdmWrapper::Create(const char* key_system,
uint32_t key_system_size,
GetCdmHostFunc get_cdm_host_func,
void* user_data) {
- COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
- cdm::ContentDecryptionModule_6::kVersion,
- update_code_below);
+ static_assert(cdm::ContentDecryptionModule::kVersion ==
+ cdm::ContentDecryptionModule_8::kVersion,
+ "update the code below");
// Ensure IsSupportedCdmInterfaceVersion() matches this implementation.
// Always update this DCHECK when updating this function.
// If this check fails, update this function and DCHECK or update
// IsSupportedCdmInterfaceVersion().
- PP_DCHECK(
- !IsSupportedCdmInterfaceVersion(cdm::ContentDecryptionModule::kVersion +
- 1) &&
- IsSupportedCdmInterfaceVersion(cdm::ContentDecryptionModule::kVersion) &&
- !IsSupportedCdmInterfaceVersion(cdm::ContentDecryptionModule::kVersion -
- 1));
+ PP_DCHECK(!IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule_8::kVersion + 1) &&
+ IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule_8::kVersion) &&
+ IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule_7::kVersion) &&
+ !IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule_7::kVersion - 1));
// Try to create the CDM using the latest CDM interface version.
CdmWrapper* cdm_wrapper =
@@ -264,6 +295,10 @@ CdmWrapper* CdmWrapper::Create(const char* key_system,
// If |cdm_wrapper| is NULL, try to create the CDM using older supported
// versions of the CDM interface here.
+ if (!cdm_wrapper) {
+ cdm_wrapper = CdmWrapperImpl<cdm::ContentDecryptionModule_7>::Create(
+ key_system, key_system_size, get_cdm_host_func, user_data);
+ }
return cdm_wrapper;
}
@@ -272,9 +307,9 @@ CdmWrapper* CdmWrapper::Create(const char* key_system,
// stub implementations for new or modified methods that the older CDM interface
// does not have.
// Also update supported_cdm_versions.h.
-COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
- cdm::ContentDecryptionModule_6::kVersion,
- ensure_cdm_wrapper_templates_have_old_version_support);
+static_assert(cdm::ContentDecryptionModule::kVersion ==
+ cdm::ContentDecryptionModule_8::kVersion,
+ "ensure cdm wrapper templates have old version support");
} // namespace media
diff --git a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
index 7b6db1b87bd..6a6ff62a548 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
@@ -7,19 +7,21 @@
#include <algorithm>
#include <cstring>
#include <sstream>
-#include <string>
-#include <vector>
#include "base/bind.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "base/stl_util.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/cdm_callback_promise.h"
+#include "media/base/cdm_key_information.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
+#include "media/base/key_systems.h"
#include "media/cdm/json_web_key.h"
#include "media/cdm/ppapi/cdm_file_io_test.h"
#include "media/cdm/ppapi/external_clear_key/cdm_video_decoder.h"
+#include "url/gurl.h"
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
#include "base/basictypes.h"
@@ -75,8 +77,7 @@ const char kExternalClearKeyCrashKeySystem[] =
// Constants for the enumalted session that can be loaded by LoadSession().
// These constants need to be in sync with
// chrome/test/data/media/encrypted_media_utils.js
-const char kLoadableWebSessionId[] = "LoadableSession";
-const char kLoadableSessionContentType[] = "video/webm";
+const char kLoadableSessionId[] = "LoadableSession";
const uint8 kLoadableSessionKeyId[] = "0123456789012345";
const uint8 kLoadableSessionKey[] =
{0xeb, 0xdd, 0x62, 0xf1, 0x68, 0x14, 0xd2, 0x7b,
@@ -86,9 +87,10 @@ const int64 kSecondsPerMinute = 60;
const int64 kMsPerSecond = 1000;
const int64 kInitialTimerDelayMs = 200;
const int64 kMaxTimerDelayMs = 1 * kSecondsPerMinute * kMsPerSecond;
-// Heart beat message header. If a key message starts with |kHeartBeatHeader|,
-// it's a heart beat message. Otherwise, it's a key request.
-const char kHeartBeatHeader[] = "HEARTBEAT";
+// Renewal message header. For prefixed EME, if a key message starts with
+// |kRenewalHeader|, it's a renewal message. Otherwise, it's a key request.
+// FIXME(jrummell): Remove this once prefixed EME goes away.
+const char kRenewalHeader[] = "RENEWAL";
// CDM file IO test result header.
const char kFileIOTestResultHeader[] = "FILEIOTESTRESULT";
@@ -107,10 +109,9 @@ static scoped_refptr<media::DecoderBuffer> CopyDecoderBufferFrom(
std::vector<media::SubsampleEntry> subsamples;
for (uint32_t i = 0; i < input_buffer.num_subsamples; ++i) {
- media::SubsampleEntry subsample;
- subsample.clear_bytes = input_buffer.subsamples[i].clear_bytes;
- subsample.cypher_bytes = input_buffer.subsamples[i].cipher_bytes;
- subsamples.push_back(subsample);
+ subsamples.push_back(
+ media::SubsampleEntry(input_buffer.subsamples[i].clear_bytes,
+ input_buffer.subsamples[i].cipher_bytes));
}
scoped_ptr<media::DecryptConfig> decrypt_config(new media::DecryptConfig(
@@ -150,22 +151,73 @@ static cdm::Error ConvertException(media::MediaKeys::Exception exception_code) {
case media::MediaKeys::OUTPUT_ERROR:
return cdm::kOutputError;
}
- NOTIMPLEMENTED();
+ NOTREACHED();
return cdm::kUnknownError;
}
static media::MediaKeys::SessionType ConvertSessionType(
cdm::SessionType session_type) {
switch (session_type) {
- case cdm::kPersistent:
- return media::MediaKeys::PERSISTENT_SESSION;
case cdm::kTemporary:
return media::MediaKeys::TEMPORARY_SESSION;
+ case cdm::kPersistentLicense:
+ return media::MediaKeys::PERSISTENT_LICENSE_SESSION;
+ case cdm::kPersistentKeyRelease:
+ return media::MediaKeys::PERSISTENT_RELEASE_MESSAGE_SESSION;
}
- NOTIMPLEMENTED();
+ NOTREACHED();
return media::MediaKeys::TEMPORARY_SESSION;
}
+static media::EmeInitDataType ConvertInitDataType(
+ cdm::InitDataType init_data_type) {
+ switch (init_data_type) {
+ case cdm::kCenc:
+ return media::EmeInitDataType::CENC;
+ case cdm::kKeyIds:
+ return media::EmeInitDataType::KEYIDS;
+ case cdm::kWebM:
+ return media::EmeInitDataType::WEBM;
+ }
+ NOTREACHED();
+ return media::EmeInitDataType::UNKNOWN;
+}
+
+cdm::KeyStatus ConvertKeyStatus(media::CdmKeyInformation::KeyStatus status) {
+ switch (status) {
+ case media::CdmKeyInformation::KeyStatus::USABLE:
+ return cdm::kUsable;
+ case media::CdmKeyInformation::KeyStatus::INTERNAL_ERROR:
+ return cdm::kInternalError;
+ case media::CdmKeyInformation::KeyStatus::EXPIRED:
+ return cdm::kExpired;
+ case media::CdmKeyInformation::KeyStatus::OUTPUT_NOT_ALLOWED:
+ return cdm::kOutputNotAllowed;
+ case media::CdmKeyInformation::KeyStatus::OUTPUT_DOWNSCALED:
+ return cdm::kOutputDownscaled;
+ case media::CdmKeyInformation::KeyStatus::KEY_STATUS_PENDING:
+ return cdm::kStatusPending;
+ }
+ NOTREACHED();
+ return cdm::kInternalError;
+}
+
+// Shallow copy all the key information from |keys_info| into |keys_vector|.
+// |keys_vector| is only valid for the lifetime of |keys_info| because it
+// contains pointers into the latter.
+void ConvertCdmKeysInfo(const std::vector<media::CdmKeyInformation*>& keys_info,
+ std::vector<cdm::KeyInformation>* keys_vector) {
+ keys_vector->reserve(keys_info.size());
+ for (const auto& key_info : keys_info) {
+ cdm::KeyInformation key;
+ key.key_id = vector_as_array(&key_info->key_id);
+ key.key_id_size = key_info->key_id.size();
+ key.status = ConvertKeyStatus(key_info->status);
+ key.system_code = key_info->system_code;
+ keys_vector->push_back(key);
+ }
+}
+
template<typename Type>
class ScopedResetter {
public:
@@ -209,7 +261,8 @@ void* CreateCdmInstance(int cdm_interface_version,
if (!host)
return NULL;
- return new media::ClearKeyCdm(host, key_system_string);
+ // TODO(jrummell): Obtain the proper origin for this instance.
+ return new media::ClearKeyCdm(host, key_system_string, GURL::EmptyGURL());
}
const char* GetCdmVersion() {
@@ -218,16 +271,20 @@ const char* GetCdmVersion() {
namespace media {
-ClearKeyCdm::ClearKeyCdm(ClearKeyCdmHost* host, const std::string& key_system)
+ClearKeyCdm::ClearKeyCdm(ClearKeyCdmHost* host,
+ const std::string& key_system,
+ const GURL& origin)
: decryptor_(
+ origin,
base::Bind(&ClearKeyCdm::OnSessionMessage, base::Unretained(this)),
base::Bind(&ClearKeyCdm::OnSessionClosed, base::Unretained(this)),
base::Bind(&ClearKeyCdm::OnSessionKeysChange,
base::Unretained(this))),
host_(host),
key_system_(key_system),
+ has_received_keys_change_event_for_emulated_loadsession_(false),
timer_delay_ms_(kInitialTimerDelayMs),
- heartbeat_timer_set_(false) {
+ renewal_timer_set_(false) {
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
channel_count_ = 0;
bits_per_channel_ = 0;
@@ -239,12 +296,18 @@ ClearKeyCdm::ClearKeyCdm(ClearKeyCdmHost* host, const std::string& key_system)
ClearKeyCdm::~ClearKeyCdm() {}
-void ClearKeyCdm::CreateSession(uint32 promise_id,
- const char* init_data_type,
- uint32 init_data_type_size,
- const uint8* init_data,
- uint32 init_data_size,
- cdm::SessionType session_type) {
+void ClearKeyCdm::Initialize(bool /* allow_distinctive_identifier */,
+ bool /* allow_persistent_state */) {
+ // Implementation doesn't use distinctive identifier nor save persistent data,
+ // so nothing to do with these values.
+}
+
+void ClearKeyCdm::CreateSessionAndGenerateRequest(
+ uint32 promise_id,
+ cdm::SessionType session_type,
+ cdm::InitDataType init_data_type,
+ const uint8* init_data,
+ uint32 init_data_size) {
DVLOG(1) << __FUNCTION__;
scoped_ptr<media::NewSessionCdmPromise> promise(
@@ -255,26 +318,27 @@ void ClearKeyCdm::CreateSession(uint32 promise_id,
base::Bind(&ClearKeyCdm::OnPromiseFailed,
base::Unretained(this),
promise_id)));
- decryptor_.CreateSession(std::string(init_data_type, init_data_type_size),
- init_data,
- init_data_size,
- ConvertSessionType(session_type),
- promise.Pass());
+ decryptor_.CreateSessionAndGenerateRequest(
+ ConvertSessionType(session_type), ConvertInitDataType(init_data_type),
+ std::vector<uint8_t>(init_data, init_data + init_data_size),
+ promise.Pass());
if (key_system_ == kExternalClearKeyFileIOTestKeySystem)
StartFileIOTest();
}
-// Loads a emulated stored session. Currently only |kLoadableWebSessionId|
+// Loads a emulated stored session. Currently only |kLoadableSessionId|
// (containing a |kLoadableSessionKey| for |kLoadableSessionKeyId|) is
// supported.
void ClearKeyCdm::LoadSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) {
+ cdm::SessionType session_type,
+ const char* session_id,
+ uint32_t session_id_length) {
DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(session_type, cdm::kPersistentLicense);
- if (std::string(kLoadableWebSessionId) !=
- std::string(web_session_id, web_session_id_length)) {
+ if (std::string(kLoadableSessionId) !=
+ std::string(session_id, session_id_length)) {
// TODO(jrummell): This should be resolved with undefined, not rejected.
std::string message("Incorrect session id specified for LoadSession().");
host_->OnRejectPromise(promise_id,
@@ -285,6 +349,9 @@ void ClearKeyCdm::LoadSession(uint32 promise_id,
return;
}
+ // Only allowed to successfully load this session once.
+ DCHECK(session_id_for_emulated_loadsession_.empty());
+
scoped_ptr<media::NewSessionCdmPromise> promise(
new media::CdmCallbackPromise<std::string>(
base::Bind(&ClearKeyCdm::OnSessionLoaded,
@@ -293,42 +360,47 @@ void ClearKeyCdm::LoadSession(uint32 promise_id,
base::Bind(&ClearKeyCdm::OnPromiseFailed,
base::Unretained(this),
promise_id)));
- decryptor_.CreateSession(std::string(kLoadableSessionContentType),
- NULL,
- 0,
- MediaKeys::TEMPORARY_SESSION,
- promise.Pass());
+ decryptor_.CreateSessionAndGenerateRequest(
+ MediaKeys::TEMPORARY_SESSION, EmeInitDataType::WEBM,
+ std::vector<uint8_t>(), promise.Pass());
}
void ClearKeyCdm::UpdateSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length,
+ const char* session_id,
+ uint32_t session_id_length,
const uint8* response,
uint32 response_size) {
DVLOG(1) << __FUNCTION__;
- std::string web_session_str(web_session_id, web_session_id_length);
+ std::string web_session_str(session_id, session_id_length);
+
+ // If updating the loadable session, use the actual session id generated.
+ if (web_session_str == std::string(kLoadableSessionId))
+ web_session_str = session_id_for_emulated_loadsession_;
scoped_ptr<media::SimpleCdmPromise> promise(new media::CdmCallbackPromise<>(
- base::Bind(&ClearKeyCdm::OnSessionUpdated,
- base::Unretained(this),
- promise_id,
- web_session_str),
- base::Bind(
- &ClearKeyCdm::OnPromiseFailed, base::Unretained(this), promise_id)));
+ base::Bind(&ClearKeyCdm::OnPromiseResolved, base::Unretained(this),
+ promise_id),
+ base::Bind(&ClearKeyCdm::OnPromiseFailed, base::Unretained(this),
+ promise_id)));
decryptor_.UpdateSession(
- web_session_str, response, response_size, promise.Pass());
+ web_session_str, std::vector<uint8_t>(response, response + response_size),
+ promise.Pass());
- if (!heartbeat_timer_set_) {
- ScheduleNextHeartBeat();
- heartbeat_timer_set_ = true;
+ if (!renewal_timer_set_) {
+ ScheduleNextRenewal();
+ renewal_timer_set_ = true;
}
}
void ClearKeyCdm::CloseSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) {
+ const char* session_id,
+ uint32_t session_id_length) {
DVLOG(1) << __FUNCTION__;
- std::string web_session_str(web_session_id, web_session_id_length);
+ std::string web_session_str(session_id, session_id_length);
+
+ // If closing the loadable session, use the actual session id generated.
+ if (web_session_str == std::string(kLoadableSessionId))
+ web_session_str = session_id_for_emulated_loadsession_;
scoped_ptr<media::SimpleCdmPromise> promise(new media::CdmCallbackPromise<>(
base::Bind(
@@ -339,24 +411,14 @@ void ClearKeyCdm::CloseSession(uint32 promise_id,
}
void ClearKeyCdm::RemoveSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) {
+ const char* session_id,
+ uint32_t session_id_length) {
DVLOG(1) << __FUNCTION__;
- // RemoveSession only allowed for persistent sessions.
- bool is_persistent_session =
- std::string(kLoadableWebSessionId) ==
- std::string(web_session_id, web_session_id_length);
- if (is_persistent_session) {
- std::string web_session_str(web_session_id, web_session_id_length);
-
- scoped_ptr<media::SimpleCdmPromise> promise(new media::CdmCallbackPromise<>(
- base::Bind(&ClearKeyCdm::OnPromiseResolved,
- base::Unretained(this),
- promise_id),
- base::Bind(&ClearKeyCdm::OnPromiseFailed,
- base::Unretained(this),
- promise_id)));
- decryptor_.RemoveSession(web_session_str, promise.Pass());
+ std::string web_session_str(session_id, session_id_length);
+
+ // RemoveSession only allowed for the loadable session.
+ if (web_session_str == std::string(kLoadableSessionId)) {
+ web_session_str = session_id_for_emulated_loadsession_;
} else {
// TODO(jrummell): This should be a DCHECK once blink does the proper
// checks.
@@ -366,7 +428,15 @@ void ClearKeyCdm::RemoveSession(uint32 promise_id,
0,
message.data(),
message.length());
+ return;
}
+
+ scoped_ptr<media::SimpleCdmPromise> promise(new media::CdmCallbackPromise<>(
+ base::Bind(&ClearKeyCdm::OnPromiseResolved, base::Unretained(this),
+ promise_id),
+ base::Bind(&ClearKeyCdm::OnPromiseFailed, base::Unretained(this),
+ promise_id)));
+ decryptor_.RemoveSession(web_session_str, promise.Pass());
}
void ClearKeyCdm::SetServerCertificate(uint32 promise_id,
@@ -376,48 +446,30 @@ void ClearKeyCdm::SetServerCertificate(uint32 promise_id,
host_->OnResolvePromise(promise_id);
}
-void ClearKeyCdm::GetUsableKeyIds(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) {
- std::string web_session_str(web_session_id, web_session_id_length);
- scoped_ptr<media::KeyIdsPromise> promise(
- new media::CdmCallbackPromise<KeyIdsVector>(
- base::Bind(&ClearKeyCdm::OnUsableKeyIdsObtained,
- base::Unretained(this),
- promise_id),
- base::Bind(&ClearKeyCdm::OnPromiseFailed,
- base::Unretained(this),
- promise_id)));
- decryptor_.GetUsableKeyIds(web_session_str, promise.Pass());
-}
-
void ClearKeyCdm::TimerExpired(void* context) {
if (context == &session_id_for_emulated_loadsession_) {
LoadLoadableSession();
return;
}
- DCHECK(heartbeat_timer_set_);
- std::string heartbeat_message;
- if (!next_heartbeat_message_.empty() &&
- context == &next_heartbeat_message_[0]) {
- heartbeat_message = next_heartbeat_message_;
+ DCHECK(renewal_timer_set_);
+ std::string renewal_message;
+ if (!next_renewal_message_.empty() &&
+ context == &next_renewal_message_[0]) {
+ renewal_message = next_renewal_message_;
} else {
- heartbeat_message = "ERROR: Invalid timer context found!";
+ renewal_message = "ERROR: Invalid timer context found!";
}
// This URL is only used for testing the code path for defaultURL.
// There is no service at this URL, so applications should ignore it.
const char url[] = "http://test.externalclearkey.chromium.org";
- host_->OnSessionMessage(last_session_id_.data(),
- last_session_id_.length(),
- heartbeat_message.data(),
- heartbeat_message.length(),
- url,
- arraysize(url) - 1);
+ host_->OnSessionMessage(last_session_id_.data(), last_session_id_.length(),
+ cdm::kLicenseRenewal, renewal_message.data(),
+ renewal_message.length(), url, arraysize(url) - 1);
- ScheduleNextHeartBeat();
+ ScheduleNextRenewal();
}
static void CopyDecryptResults(
@@ -601,14 +653,14 @@ void ClearKeyCdm::Destroy() {
delete this;
}
-void ClearKeyCdm::ScheduleNextHeartBeat() {
- // Prepare the next heartbeat message and set timer.
+void ClearKeyCdm::ScheduleNextRenewal() {
+ // Prepare the next renewal message and set timer.
std::ostringstream msg_stream;
- msg_stream << kHeartBeatHeader << " from ClearKey CDM set at time "
+ msg_stream << kRenewalHeader << " from ClearKey CDM set at time "
<< host_->GetCurrentWallTime() << ".";
- next_heartbeat_message_ = msg_stream.str();
+ next_renewal_message_ = msg_stream.str();
- host_->SetTimer(timer_delay_ms_, &next_heartbeat_message_[0]);
+ host_->SetTimer(timer_delay_ms_, &next_renewal_message_[0]);
// Use a smaller timer delay at start-up to facilitate testing. Increase the
// timer delay up to a limit to avoid message spam.
@@ -653,7 +705,9 @@ void ClearKeyCdm::OnPlatformChallengeResponse(
}
void ClearKeyCdm::OnQueryOutputProtectionStatus(
- uint32_t link_mask, uint32_t output_protection_mask) {
+ cdm::QueryResult result,
+ uint32_t link_mask,
+ uint32_t output_protection_mask) {
NOTIMPLEMENTED();
};
@@ -662,75 +716,87 @@ void ClearKeyCdm::LoadLoadableSession() {
sizeof(kLoadableSessionKey),
kLoadableSessionKeyId,
sizeof(kLoadableSessionKeyId) - 1);
- // TODO(xhwang): This triggers OnSessionUpdated(). For prefixed EME support,
- // this is okay. Check WD EME support.
scoped_ptr<media::SimpleCdmPromise> promise(new media::CdmCallbackPromise<>(
- base::Bind(&ClearKeyCdm::OnSessionUpdated,
- base::Unretained(this),
- promise_id_for_emulated_loadsession_,
- session_id_for_emulated_loadsession_),
- base::Bind(&ClearKeyCdm::OnPromiseFailed,
- base::Unretained(this),
+ base::Bind(&ClearKeyCdm::OnLoadSessionUpdated, base::Unretained(this)),
+ base::Bind(&ClearKeyCdm::OnPromiseFailed, base::Unretained(this),
promise_id_for_emulated_loadsession_)));
decryptor_.UpdateSession(session_id_for_emulated_loadsession_,
- reinterpret_cast<const uint8*>(jwk_set.data()),
- jwk_set.size(),
+ std::vector<uint8_t>(jwk_set.begin(), jwk_set.end()),
promise.Pass());
}
-void ClearKeyCdm::OnSessionMessage(const std::string& web_session_id,
+void ClearKeyCdm::OnSessionMessage(const std::string& session_id,
+ MediaKeys::MessageType message_type,
const std::vector<uint8>& message,
- const GURL& destination_url) {
+ const GURL& legacy_destination_url) {
DVLOG(1) << "OnSessionMessage: " << message.size();
// Ignore the message when we are waiting to update the loadable session.
- if (web_session_id == session_id_for_emulated_loadsession_)
+ if (session_id == session_id_for_emulated_loadsession_)
return;
// OnSessionMessage() only called during CreateSession(), so no promise
// involved (OnSessionCreated() called to resolve the CreateSession()
// promise).
- host_->OnSessionMessage(web_session_id.data(),
- web_session_id.length(),
+ host_->OnSessionMessage(session_id.data(), session_id.length(),
+ cdm::kLicenseRequest,
reinterpret_cast<const char*>(message.data()),
- message.size(),
- destination_url.spec().data(),
- destination_url.spec().size());
-}
-
-void ClearKeyCdm::OnSessionKeysChange(const std::string& web_session_id,
- bool has_additional_usable_key) {
- // Ignore the message when we are waiting to update the loadable session.
- if (web_session_id == session_id_for_emulated_loadsession_)
- return;
+ message.size(), legacy_destination_url.spec().data(),
+ legacy_destination_url.spec().size());
+}
+
+void ClearKeyCdm::OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info) {
+ DVLOG(1) << "OnSessionKeysChange: " << keys_info.size();
+
+ std::string new_session_id = session_id;
+ if (new_session_id == session_id_for_emulated_loadsession_) {
+ // Save |keys_info| if the loadable session is still being created. This
+ // event will then be forwarded on in OnLoadSessionUpdated().
+ if (promise_id_for_emulated_loadsession_ != 0) {
+ has_received_keys_change_event_for_emulated_loadsession_ = true;
+ keys_info_for_emulated_loadsession_.swap(keys_info);
+ return;
+ }
+
+ // Loadable session has already been created, so pass this event on,
+ // using the session_id callers expect to see.
+ new_session_id = std::string(kLoadableSessionId);
+ }
- host_->OnSessionUsableKeysChange(web_session_id.data(),
- web_session_id.length(),
- has_additional_usable_key);
+ std::vector<cdm::KeyInformation> keys_vector;
+ ConvertCdmKeysInfo(keys_info.get(), &keys_vector);
+ host_->OnSessionKeysChange(new_session_id.data(), new_session_id.length(),
+ has_additional_usable_key,
+ vector_as_array(&keys_vector), keys_vector.size());
}
-void ClearKeyCdm::OnSessionClosed(const std::string& web_session_id) {
- host_->OnSessionClosed(web_session_id.data(), web_session_id.length());
+void ClearKeyCdm::OnSessionClosed(const std::string& session_id) {
+ std::string new_session_id = session_id;
+ if (new_session_id == session_id_for_emulated_loadsession_)
+ new_session_id = std::string(kLoadableSessionId);
+ host_->OnSessionClosed(new_session_id.data(), new_session_id.length());
}
void ClearKeyCdm::OnSessionCreated(uint32 promise_id,
- const std::string& web_session_id) {
- // Save the latest session ID for heartbeat and file IO test messages.
- last_session_id_ = web_session_id;
+ const std::string& session_id) {
+ // Save the latest session ID for renewal and file IO test messages.
+ last_session_id_ = session_id;
- host_->OnResolveNewSessionPromise(
- promise_id, web_session_id.data(), web_session_id.length());
+ host_->OnResolveNewSessionPromise(promise_id, session_id.data(),
+ session_id.length());
}
void ClearKeyCdm::OnSessionLoaded(uint32 promise_id,
- const std::string& web_session_id) {
- // Save the latest session ID for heartbeat and file IO test messages.
- last_session_id_ = web_session_id;
+ const std::string& session_id) {
+ // Save the latest session ID for renewal and file IO test messages.
+ last_session_id_ = session_id;
- // |decryptor_| created some session as |web_session_id|, but going forward
- // we need to map that to |kLoadableWebSessionId|, as that is what callers
+ // |decryptor_| created some session as |session_id|, but going forward
+ // we need to map that to |kLoadableSessionId|, as that is what callers
// expect.
- session_id_for_emulated_loadsession_ = web_session_id;
+ session_id_for_emulated_loadsession_ = session_id;
// Delay LoadLoadableSession() to test the case where Decrypt*() calls are
// made before the session is fully loaded.
@@ -744,32 +810,33 @@ void ClearKeyCdm::OnSessionLoaded(uint32 promise_id,
host_->SetTimer(kDelayToLoadSessionMs, &session_id_for_emulated_loadsession_);
}
-void ClearKeyCdm::OnSessionUpdated(uint32 promise_id,
- const std::string& web_session_id) {
- // UpdateSession() may be called to finish loading sessions, so handle
+void ClearKeyCdm::OnLoadSessionUpdated() {
+ // This method is only called to finish loading sessions, so handle
// appropriately.
- if (web_session_id == session_id_for_emulated_loadsession_) {
- session_id_for_emulated_loadsession_ = std::string();
- // |promise_id| is the LoadSession() promise, so resolve appropriately.
- host_->OnResolveNewSessionPromise(
- promise_id, kLoadableWebSessionId, strlen(kLoadableWebSessionId));
- // Generate the UsableKeys event now that the session is "loaded".
- host_->OnSessionUsableKeysChange(
- kLoadableWebSessionId, strlen(kLoadableWebSessionId), true);
- return;
- }
-
- host_->OnResolvePromise(promise_id);
-}
-void ClearKeyCdm::OnUsableKeyIdsObtained(uint32 promise_id,
- const KeyIdsVector& key_ids) {
- scoped_ptr<cdm::BinaryData[]> result(new cdm::BinaryData[key_ids.size()]);
- for (uint32 i = 0; i < key_ids.size(); ++i) {
- result[i].data = key_ids[i].data();
- result[i].length = key_ids[i].size();
+ // |promise_id_for_emulated_loadsession_| is the LoadSession() promise,
+ // so resolve appropriately.
+ host_->OnResolveNewSessionPromise(promise_id_for_emulated_loadsession_,
+ kLoadableSessionId,
+ strlen(kLoadableSessionId));
+ promise_id_for_emulated_loadsession_ = 0;
+
+ // Generate the KeysChange event now that the session is "loaded" if one
+ // was seen.
+ // TODO(jrummell): Once the order of events is fixed in the spec, either
+ // require the keyschange event to have happened, or remove this code.
+ // http://crbug.com/448225
+ if (has_received_keys_change_event_for_emulated_loadsession_) {
+ std::vector<cdm::KeyInformation> keys_vector;
+ CdmKeysInfo keys_info;
+ keys_info.swap(keys_info_for_emulated_loadsession_);
+ has_received_keys_change_event_for_emulated_loadsession_ = false;
+ DCHECK(!keys_vector.empty());
+ ConvertCdmKeysInfo(keys_info.get(), &keys_vector);
+ host_->OnSessionKeysChange(
+ kLoadableSessionId, strlen(kLoadableSessionId), !keys_vector.empty(),
+ vector_as_array(&keys_vector), keys_vector.size());
}
- host_->OnResolveKeyIdsPromise(promise_id, result.get(), key_ids.size());
}
void ClearKeyCdm::OnPromiseResolved(uint32 promise_id) {
@@ -856,12 +923,9 @@ void ClearKeyCdm::StartFileIOTest() {
void ClearKeyCdm::OnFileIOTestComplete(bool success) {
DVLOG(1) << __FUNCTION__ << ": " << success;
std::string message = GetFileIOTestResultMessage(success);
- host_->OnSessionMessage(last_session_id_.data(),
- last_session_id_.length(),
- message.data(),
- message.length(),
- NULL,
- 0);
+ host_->OnSessionMessage(last_session_id_.data(), last_session_id_.length(),
+ cdm::kLicenseRequest, message.data(),
+ message.length(), NULL, 0);
file_io_test_runner_.reset();
}
diff --git a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h
index f75dcc55d17..64186798284 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h
@@ -22,6 +22,8 @@
#define CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER
#endif
+class GURL;
+
namespace media {
class FileIOTestRunner;
class CdmVideoDecoder;
@@ -31,85 +33,83 @@ class FFmpegCdmAudioDecoder;
// Clear key implementation of the cdm::ContentDecryptionModule interface.
class ClearKeyCdm : public ClearKeyCdmInterface {
public:
- ClearKeyCdm(Host* host, const std::string& key_system);
- virtual ~ClearKeyCdm();
+ ClearKeyCdm(Host* host, const std::string& key_system, const GURL& origin);
+ ~ClearKeyCdm() override;
// ContentDecryptionModule implementation.
- virtual void CreateSession(uint32 promise_id,
- const char* init_data_type,
- uint32 init_data_type_size,
- const uint8* init_data,
- uint32 init_data_size,
- cdm::SessionType session_type) override;
- virtual void LoadSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) override;
- virtual void UpdateSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length,
- const uint8* response,
- uint32 response_size) override;
- virtual void CloseSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) override;
- virtual void RemoveSession(uint32 promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) override;
- virtual void GetUsableKeyIds(uint32_t promise_id,
- const char* web_session_id,
- uint32_t web_session_id_length) override;
- virtual void SetServerCertificate(
- uint32 promise_id,
- const uint8_t* server_certificate_data,
- uint32_t server_certificate_data_size) override;
- virtual void TimerExpired(void* context) override;
- virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
- cdm::DecryptedBlock* decrypted_block) override;
- virtual cdm::Status InitializeAudioDecoder(
+ void Initialize(bool allow_distinctive_identifier,
+ bool allow_persistent_state) override;
+ void CreateSessionAndGenerateRequest(uint32 promise_id,
+ cdm::SessionType session_type,
+ cdm::InitDataType init_data_type,
+ const uint8* init_data,
+ uint32 init_data_size) override;
+ void LoadSession(uint32 promise_id,
+ cdm::SessionType session_type,
+ const char* session_id,
+ uint32_t session_id_length) override;
+ void UpdateSession(uint32 promise_id,
+ const char* session_id,
+ uint32_t session_id_length,
+ const uint8* response,
+ uint32 response_size) override;
+ void CloseSession(uint32 promise_id,
+ const char* session_id,
+ uint32_t session_id_length) override;
+ void RemoveSession(uint32 promise_id,
+ const char* session_id,
+ uint32_t session_id_length) override;
+ void SetServerCertificate(uint32 promise_id,
+ const uint8_t* server_certificate_data,
+ uint32_t server_certificate_data_size) override;
+ void TimerExpired(void* context) override;
+ cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
+ cdm::DecryptedBlock* decrypted_block) override;
+ cdm::Status InitializeAudioDecoder(
const cdm::AudioDecoderConfig& audio_decoder_config) override;
- virtual cdm::Status InitializeVideoDecoder(
+ cdm::Status InitializeVideoDecoder(
const cdm::VideoDecoderConfig& video_decoder_config) override;
- virtual void DeinitializeDecoder(cdm::StreamType decoder_type) override;
- virtual void ResetDecoder(cdm::StreamType decoder_type) override;
- virtual cdm::Status DecryptAndDecodeFrame(
- const cdm::InputBuffer& encrypted_buffer,
- cdm::VideoFrame* video_frame) override;
- virtual cdm::Status DecryptAndDecodeSamples(
- const cdm::InputBuffer& encrypted_buffer,
- cdm::AudioFrames* audio_frames) override;
- virtual void Destroy() override;
- virtual void OnPlatformChallengeResponse(
+ void DeinitializeDecoder(cdm::StreamType decoder_type) override;
+ void ResetDecoder(cdm::StreamType decoder_type) override;
+ cdm::Status DecryptAndDecodeFrame(const cdm::InputBuffer& encrypted_buffer,
+ cdm::VideoFrame* video_frame) override;
+ cdm::Status DecryptAndDecodeSamples(const cdm::InputBuffer& encrypted_buffer,
+ cdm::AudioFrames* audio_frames) override;
+ void Destroy() override;
+ void OnPlatformChallengeResponse(
const cdm::PlatformChallengeResponse& response) override;
- virtual void OnQueryOutputProtectionStatus(
- uint32_t link_mask, uint32_t output_protection_mask) override;
+ void OnQueryOutputProtectionStatus(cdm::QueryResult result,
+ uint32_t link_mask,
+ uint32_t output_protection_mask) override;
private:
// Emulates a session stored for |session_id_for_emulated_loadsession_|. This
// is necessary since aes_decryptor.cc does not support storing sessions.
void LoadLoadableSession();
+ void OnLoadSessionUpdated();
// ContentDecryptionModule callbacks.
- void OnSessionMessage(const std::string& web_session_id,
+ void OnSessionMessage(const std::string& session_id,
+ MediaKeys::MessageType message_type,
const std::vector<uint8>& message,
- const GURL& destination_url);
- void OnSessionKeysChange(const std::string& web_session_id,
- bool has_additional_usable_key);
- void OnSessionClosed(const std::string& web_session_id);
+ const GURL& legacy_destination_url);
+ void OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info);
+ void OnSessionClosed(const std::string& session_id);
// Handle the success/failure of a promise. These methods are responsible for
// calling |host_| to resolve or reject the promise.
- void OnSessionCreated(uint32 promise_id, const std::string& web_session_id);
- void OnSessionLoaded(uint32 promise_id, const std::string& web_session_id);
- void OnSessionUpdated(uint32 promise_id, const std::string& web_session_id);
- void OnUsableKeyIdsObtained(uint32 promise_id, const KeyIdsVector& key_ids);
+ void OnSessionCreated(uint32 promise_id, const std::string& session_id);
+ void OnSessionLoaded(uint32 promise_id, const std::string& session_id);
void OnPromiseResolved(uint32 promise_id);
void OnPromiseFailed(uint32 promise_id,
MediaKeys::Exception exception_code,
uint32 system_code,
const std::string& error_message);
- // Prepares next heartbeat message and sets a timer for it.
- void ScheduleNextHeartBeat();
+ // Prepares next renewal message and sets a timer for it.
+ void ScheduleNextRenewal();
// Decrypts the |encrypted_buffer| and puts the result in |decrypted_buffer|.
// Returns cdm::kSuccess if decryption succeeded. The decrypted result is
@@ -143,7 +143,7 @@ class ClearKeyCdm : public ClearKeyCdmInterface {
void OnFileIOTestComplete(bool success);
// Keep track of the last session created.
- void SetSessionId(const std::string& web_session_id);
+ void SetSessionId(const std::string& session_id);
AesDecryptor decryptor_;
@@ -152,29 +152,43 @@ class ClearKeyCdm : public ClearKeyCdmInterface {
const std::string key_system_;
std::string last_session_id_;
- std::string next_heartbeat_message_;
+ std::string next_renewal_message_;
// In order to simulate LoadSession(), CreateSession() and then
// UpdateSession() will be called to create a session with known keys.
// |session_id_for_emulated_loadsession_| is used to keep track of the
// session_id allocated by aes_decryptor, as the session_id will be returned
- // as |kLoadableWebSessionId|. Future requests for this simulated session
+ // as |kLoadableSessionId|. Future requests for this simulated session
// need to use |session_id_for_emulated_loadsession_| for all calls
// to aes_decryptor.
// |promise_id_for_emulated_loadsession_| is used to keep track of the
// original LoadSession() promise, as it is not resolved until the
// UpdateSession() call succeeds.
+ // |has_received_keys_change_event_for_emulated_loadsession_| is used to keep
+ // track of whether a keyschange event has been received for the loadable
+ // session in case it happens before the emulated session is fully created.
+ // |keys_info_for_emulated_loadsession_| is used to keep track of the list
+ // of keys provided as a result of calling UpdateSession() if it happens,
+ // since they can't be forwarded on until the LoadSession() promise is
+ // resolved.
// TODO(xhwang): Extract testing code from main implementation.
// See http://crbug.com/341751
+ // TODO(jrummell): Once the order of events is fixed,
+ // |has_received_keys_change_event_for_emulated_loadsession_| should be
+ // removed (the event should have either happened or never happened).
+ // |keys_info_for_emulated_loadsession_| may also go away if the event is
+ // not expected. See http://crbug.com/448225
std::string session_id_for_emulated_loadsession_;
uint32_t promise_id_for_emulated_loadsession_;
+ bool has_received_keys_change_event_for_emulated_loadsession_;
+ CdmKeysInfo keys_info_for_emulated_loadsession_;
// Timer delay in milliseconds for the next host_->SetTimer() call.
int64 timer_delay_ms_;
- // Indicates whether a heartbeat timer has been set to prevent multiple timers
+ // Indicates whether a renewal timer has been set to prevent multiple timers
// from running.
- bool heartbeat_timer_set_;
+ bool renewal_timer_set_;
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
int channel_count_;
diff --git a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h
index dfd1fe2a784..0c70c7d0a9b 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h
@@ -10,7 +10,7 @@
namespace media {
// Aliases for the version of the interfaces that this CDM implements.
-typedef cdm::ContentDecryptionModule_6 ClearKeyCdmInterface;
+typedef cdm::ContentDecryptionModule_8 ClearKeyCdmInterface;
typedef ClearKeyCdmInterface::Host ClearKeyCdmHost;
} // namespace media
diff --git a/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h
index 6bc5e32333c..d10372e94e8 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h
@@ -15,17 +15,17 @@ namespace media {
class FakeCdmVideoDecoder : public CdmVideoDecoder {
public:
explicit FakeCdmVideoDecoder(cdm::Host* host);
- virtual ~FakeCdmVideoDecoder();
+ ~FakeCdmVideoDecoder() override;
// CdmVideoDecoder implementation.
- virtual bool Initialize(const cdm::VideoDecoderConfig& config) override;
- virtual void Deinitialize() override;
- virtual void Reset() override;
- virtual cdm::Status DecodeFrame(const uint8_t* compressed_frame,
- int32_t compressed_frame_size,
- int64_t timestamp,
- cdm::VideoFrame* decoded_frame) override;
- virtual bool is_initialized() const override { return is_initialized_; }
+ bool Initialize(const cdm::VideoDecoderConfig& config) override;
+ void Deinitialize() override;
+ void Reset() override;
+ cdm::Status DecodeFrame(const uint8_t* compressed_frame,
+ int32_t compressed_frame_size,
+ int64_t timestamp,
+ cdm::VideoFrame* decoded_frame) override;
+ bool is_initialized() const override { return is_initialized_; }
private:
bool is_initialized_;
diff --git a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
index 942dce11e8e..0bf0a305017 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
@@ -5,7 +5,6 @@
#include "media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h"
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
#include "media/base/buffers.h"
#include "media/base/limits.h"
#include "media/ffmpeg/ffmpeg_common.h"
diff --git a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h
index 00e11fddf47..70bd2bf46c7 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h
@@ -20,17 +20,17 @@ namespace media {
class FFmpegCdmVideoDecoder : public CdmVideoDecoder {
public:
explicit FFmpegCdmVideoDecoder(ClearKeyCdmHost* host);
- virtual ~FFmpegCdmVideoDecoder();
+ ~FFmpegCdmVideoDecoder() override;
// CdmVideoDecoder implementation.
- virtual bool Initialize(const cdm::VideoDecoderConfig& config) override;
- virtual void Deinitialize() override;
- virtual void Reset() override;
- virtual cdm::Status DecodeFrame(const uint8_t* compressed_frame,
- int32_t compressed_frame_size,
- int64_t timestamp,
- cdm::VideoFrame* decoded_frame) override;
- virtual bool is_initialized() const override { return is_initialized_; }
+ bool Initialize(const cdm::VideoDecoderConfig& config) override;
+ void Deinitialize() override;
+ void Reset() override;
+ cdm::Status DecodeFrame(const uint8_t* compressed_frame,
+ int32_t compressed_frame_size,
+ int64_t timestamp,
+ cdm::VideoFrame* decoded_frame) override;
+ bool is_initialized() const override { return is_initialized_; }
// Returns true when |format| and |data_size| specify a supported video
// output configuration.
diff --git a/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h
index 0ba05b9cffe..14f8b147630 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h
@@ -18,17 +18,17 @@ namespace media {
class LibvpxCdmVideoDecoder : public CdmVideoDecoder {
public:
explicit LibvpxCdmVideoDecoder(CdmHost* host);
- virtual ~LibvpxCdmVideoDecoder();
+ ~LibvpxCdmVideoDecoder() override;
// CdmVideoDecoder implementation.
- virtual bool Initialize(const cdm::VideoDecoderConfig& config) override;
- virtual void Deinitialize() override;
- virtual void Reset() override;
- virtual cdm::Status DecodeFrame(const uint8_t* compressed_frame,
- int32_t compressed_frame_size,
- int64_t timestamp,
- cdm::VideoFrame* decoded_frame) override;
- virtual bool is_initialized() const override { return is_initialized_; }
+ bool Initialize(const cdm::VideoDecoderConfig& config) override;
+ void Deinitialize() override;
+ void Reset() override;
+ cdm::Status DecodeFrame(const uint8_t* compressed_frame,
+ int32_t compressed_frame_size,
+ int64_t timestamp,
+ cdm::VideoFrame* decoded_frame) override;
+ bool is_initialized() const override { return is_initialized_; }
// Returns true when |format| and |data_size| specify a supported video
// output configuration.
diff --git a/chromium/media/cdm/ppapi/supported_cdm_versions.h b/chromium/media/cdm/ppapi/supported_cdm_versions.h
index ae6e567c0e0..f27d304f88b 100644
--- a/chromium/media/cdm/ppapi/supported_cdm_versions.h
+++ b/chromium/media/cdm/ppapi/supported_cdm_versions.h
@@ -10,7 +10,7 @@
namespace media {
bool IsSupportedCdmModuleVersion(int version) {
- switch(version) {
+ switch (version) {
// Latest.
case CDM_MODULE_VERSION:
return true;
@@ -20,12 +20,13 @@ bool IsSupportedCdmModuleVersion(int version) {
}
bool IsSupportedCdmInterfaceVersion(int version) {
- COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
- cdm::ContentDecryptionModule_6::kVersion,
- update_code_below);
+ static_assert(cdm::ContentDecryptionModule::kVersion ==
+ cdm::ContentDecryptionModule_8::kVersion,
+ "update the code below");
switch(version) {
// Supported versions in decreasing order.
- case cdm::ContentDecryptionModule_6::kVersion:
+ case cdm::ContentDecryptionModule_8::kVersion:
+ case cdm::ContentDecryptionModule_7::kVersion:
return true;
default:
return false;
@@ -33,12 +34,13 @@ bool IsSupportedCdmInterfaceVersion(int version) {
}
bool IsSupportedCdmHostVersion(int version) {
- COMPILE_ASSERT(cdm::ContentDecryptionModule::Host::kVersion ==
- cdm::ContentDecryptionModule_6::Host::kVersion,
- update_code_below);
+ static_assert(cdm::ContentDecryptionModule::Host::kVersion ==
+ cdm::ContentDecryptionModule_8::Host::kVersion,
+ "update the code below");
switch(version) {
// Supported versions in decreasing order.
- case cdm::Host_6::kVersion:
+ case cdm::Host_8::kVersion:
+ case cdm::Host_7::kVersion:
return true;
default:
return false;
diff --git a/chromium/media/cdm/proxy_decryptor.cc b/chromium/media/cdm/proxy_decryptor.cc
new file mode 100644
index 00000000000..a56821141ae
--- /dev/null
+++ b/chromium/media/cdm/proxy_decryptor.cc
@@ -0,0 +1,402 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/proxy_decryptor.h"
+
+#include <cstring>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "media/base/cdm_callback_promise.h"
+#include "media/base/cdm_config.h"
+#include "media/base/cdm_factory.h"
+#include "media/base/cdm_key_information.h"
+#include "media/base/key_systems.h"
+#include "media/base/media_permission.h"
+#include "media/cdm/json_web_key.h"
+#include "media/cdm/key_system_names.h"
+
+namespace media {
+
+// Special system code to signal a closed persistent session in a SessionError()
+// call. This is needed because there is no SessionClosed() call in the prefixed
+// EME API.
+const int kSessionClosedSystemCode = 29127;
+
+ProxyDecryptor::PendingGenerateKeyRequestData::PendingGenerateKeyRequestData(
+ EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data)
+ : init_data_type(init_data_type), init_data(init_data) {
+}
+
+ProxyDecryptor::PendingGenerateKeyRequestData::
+ ~PendingGenerateKeyRequestData() {
+}
+
+ProxyDecryptor::ProxyDecryptor(MediaPermission* media_permission,
+ bool use_hw_secure_codecs,
+ const KeyAddedCB& key_added_cb,
+ const KeyErrorCB& key_error_cb,
+ const KeyMessageCB& key_message_cb)
+ : is_creating_cdm_(false),
+ media_permission_(media_permission),
+ use_hw_secure_codecs_(use_hw_secure_codecs),
+ key_added_cb_(key_added_cb),
+ key_error_cb_(key_error_cb),
+ key_message_cb_(key_message_cb),
+ is_clear_key_(false),
+ weak_ptr_factory_(this) {
+ DCHECK(media_permission);
+ DCHECK(!key_added_cb_.is_null());
+ DCHECK(!key_error_cb_.is_null());
+ DCHECK(!key_message_cb_.is_null());
+}
+
+ProxyDecryptor::~ProxyDecryptor() {
+ // Destroy the decryptor explicitly before destroying the plugin.
+ media_keys_.reset();
+}
+
+void ProxyDecryptor::CreateCdm(CdmFactory* cdm_factory,
+ const std::string& key_system,
+ const GURL& security_origin,
+ const CdmContextReadyCB& cdm_context_ready_cb) {
+ DVLOG(1) << __FUNCTION__ << ": key_system = " << key_system;
+ DCHECK(!is_creating_cdm_);
+ DCHECK(!media_keys_);
+
+ // TODO(sandersd): Trigger permissions check here and use it to determine
+ // distinctive identifier support, instead of always requiring the
+ // permission. http://crbug.com/455271
+ CdmConfig cdm_config;
+ cdm_config.allow_distinctive_identifier = true;
+ cdm_config.allow_persistent_state = true;
+ cdm_config.use_hw_secure_codecs = use_hw_secure_codecs_;
+
+ is_creating_cdm_ = true;
+
+ base::WeakPtr<ProxyDecryptor> weak_this = weak_ptr_factory_.GetWeakPtr();
+ cdm_factory->Create(
+ key_system, security_origin, cdm_config,
+ base::Bind(&ProxyDecryptor::OnSessionMessage, weak_this),
+ base::Bind(&ProxyDecryptor::OnSessionClosed, weak_this),
+ base::Bind(&ProxyDecryptor::OnLegacySessionError, weak_this),
+ base::Bind(&ProxyDecryptor::OnSessionKeysChange, weak_this),
+ base::Bind(&ProxyDecryptor::OnSessionExpirationUpdate, weak_this),
+ base::Bind(&ProxyDecryptor::OnCdmCreated, weak_this, key_system,
+ security_origin, cdm_context_ready_cb));
+}
+
+void ProxyDecryptor::OnCdmCreated(const std::string& key_system,
+ const GURL& security_origin,
+ const CdmContextReadyCB& cdm_context_ready_cb,
+ scoped_ptr<MediaKeys> cdm,
+ const std::string& /* error_message */) {
+ is_creating_cdm_ = false;
+
+ if (!cdm) {
+ cdm_context_ready_cb.Run(nullptr);
+ } else {
+ key_system_ = key_system;
+ security_origin_ = security_origin;
+ is_clear_key_ = IsClearKey(key_system) || IsExternalClearKey(key_system);
+ media_keys_ = cdm.Pass();
+
+ cdm_context_ready_cb.Run(media_keys_->GetCdmContext());
+ }
+
+ for (const auto& request : pending_requests_)
+ GenerateKeyRequestInternal(request->init_data_type, request->init_data);
+
+ pending_requests_.clear();
+}
+
+void ProxyDecryptor::GenerateKeyRequest(EmeInitDataType init_data_type,
+ const uint8* init_data,
+ int init_data_length) {
+ std::vector<uint8> init_data_vector(init_data, init_data + init_data_length);
+
+ if (is_creating_cdm_) {
+ pending_requests_.push_back(
+ new PendingGenerateKeyRequestData(init_data_type, init_data_vector));
+ return;
+ }
+
+ GenerateKeyRequestInternal(init_data_type, init_data_vector);
+}
+
+// Returns true if |data| is prefixed with |header| and has data after the
+// |header|.
+bool HasHeader(const std::vector<uint8>& data, const std::string& header) {
+ return data.size() > header.size() &&
+ std::equal(header.begin(), header.end(), data.begin());
+}
+
+// Removes the first |length| items from |data|.
+void StripHeader(std::vector<uint8>& data, size_t length) {
+ data.erase(data.begin(), data.begin() + length);
+}
+
+void ProxyDecryptor::GenerateKeyRequestInternal(
+ EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(!is_creating_cdm_);
+
+ if (!media_keys_) {
+ OnLegacySessionError(std::string(), MediaKeys::NOT_SUPPORTED_ERROR, 0,
+ "CDM creation failed.");
+ return;
+ }
+
+ const char kPrefixedApiPersistentSessionHeader[] = "PERSISTENT|";
+ const char kPrefixedApiLoadSessionHeader[] = "LOAD_SESSION|";
+
+ SessionCreationType session_creation_type = TemporarySession;
+ std::vector<uint8> stripped_init_data = init_data;
+ if (HasHeader(init_data, kPrefixedApiLoadSessionHeader)) {
+ session_creation_type = LoadSession;
+ StripHeader(stripped_init_data, strlen(kPrefixedApiLoadSessionHeader));
+ } else if (HasHeader(init_data, kPrefixedApiPersistentSessionHeader)) {
+ session_creation_type = PersistentSession;
+ StripHeader(stripped_init_data,
+ strlen(kPrefixedApiPersistentSessionHeader));
+ }
+
+ scoped_ptr<NewSessionCdmPromise> promise(new CdmCallbackPromise<std::string>(
+ base::Bind(&ProxyDecryptor::SetSessionId, weak_ptr_factory_.GetWeakPtr(),
+ session_creation_type),
+ base::Bind(&ProxyDecryptor::OnLegacySessionError,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::string()))); // No session id until created.
+
+ if (session_creation_type == LoadSession) {
+ media_keys_->LoadSession(
+ MediaKeys::PERSISTENT_LICENSE_SESSION,
+ std::string(
+ reinterpret_cast<const char*>(vector_as_array(&stripped_init_data)),
+ stripped_init_data.size()),
+ promise.Pass());
+ return;
+ }
+
+ MediaKeys::SessionType session_type =
+ session_creation_type == PersistentSession
+ ? MediaKeys::PERSISTENT_LICENSE_SESSION
+ : MediaKeys::TEMPORARY_SESSION;
+
+ // No permission required when AesDecryptor is used or when the key system is
+ // external clear key.
+ DCHECK(!key_system_.empty());
+ if (CanUseAesDecryptor(key_system_) || IsExternalClearKey(key_system_)) {
+ OnPermissionStatus(session_type, init_data_type, stripped_init_data,
+ promise.Pass(), true /* granted */);
+ return;
+ }
+
+#if defined(OS_CHROMEOS) || defined(OS_ANDROID)
+ media_permission_->RequestPermission(
+ MediaPermission::PROTECTED_MEDIA_IDENTIFIER, security_origin_,
+ base::Bind(&ProxyDecryptor::OnPermissionStatus,
+ weak_ptr_factory_.GetWeakPtr(), session_type, init_data_type,
+ stripped_init_data, base::Passed(&promise)));
+#else
+ OnPermissionStatus(session_type, init_data_type, stripped_init_data,
+ promise.Pass(), true /* granted */);
+#endif
+}
+
+void ProxyDecryptor::OnPermissionStatus(
+ MediaKeys::SessionType session_type,
+ EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data,
+ scoped_ptr<NewSessionCdmPromise> promise,
+ bool granted) {
+ // ProxyDecryptor is only used by Prefixed EME, where RequestPermission() is
+ // only for triggering the permission UI. Later CheckPermission() will be
+ // called (e.g. in PlatformVerificationFlow on ChromeOS; in BrowserCdmManager
+ // on Android) and the permission status will be evaluated then.
+ DVLOG_IF(1, !granted) << "Permission request rejected.";
+
+ media_keys_->CreateSessionAndGenerateRequest(session_type, init_data_type,
+ init_data, promise.Pass());
+}
+
+void ProxyDecryptor::AddKey(const uint8* key,
+ int key_length,
+ const uint8* init_data,
+ int init_data_length,
+ const std::string& session_id) {
+ DVLOG(1) << "AddKey()";
+
+ if (!media_keys_) {
+ OnLegacySessionError(std::string(), MediaKeys::INVALID_STATE_ERROR, 0,
+ "CDM is not available.");
+ return;
+ }
+
+ // In the prefixed API, the session parameter provided to addKey() is
+ // optional, so use the single existing session if it exists.
+ std::string new_session_id(session_id);
+ if (new_session_id.empty()) {
+ if (active_sessions_.size() == 1) {
+ base::hash_map<std::string, bool>::iterator it = active_sessions_.begin();
+ new_session_id = it->first;
+ } else {
+ OnLegacySessionError(std::string(), MediaKeys::NOT_SUPPORTED_ERROR, 0,
+ "SessionId not specified.");
+ return;
+ }
+ }
+
+ scoped_ptr<SimpleCdmPromise> promise(new CdmCallbackPromise<>(
+ base::Bind(&ProxyDecryptor::GenerateKeyAdded,
+ weak_ptr_factory_.GetWeakPtr(), session_id),
+ base::Bind(&ProxyDecryptor::OnLegacySessionError,
+ weak_ptr_factory_.GetWeakPtr(), session_id)));
+
+ // EME WD spec only supports a single array passed to the CDM. For
+ // Clear Key using v0.1b, both arrays are used (|init_data| is key_id).
+ // Since the EME WD spec supports the key as a JSON Web Key,
+ // convert the 2 arrays to a JWK and pass it as the single array.
+ if (is_clear_key_) {
+ // Decryptor doesn't support empty key ID (see http://crbug.com/123265).
+ // So ensure a non-empty value is passed.
+ if (!init_data) {
+ static const uint8 kDummyInitData[1] = {0};
+ init_data = kDummyInitData;
+ init_data_length = arraysize(kDummyInitData);
+ }
+
+ std::string jwk =
+ GenerateJWKSet(key, key_length, init_data, init_data_length);
+ DCHECK(!jwk.empty());
+ media_keys_->UpdateSession(new_session_id,
+ std::vector<uint8_t>(jwk.begin(), jwk.end()),
+ promise.Pass());
+ return;
+ }
+
+ media_keys_->UpdateSession(new_session_id,
+ std::vector<uint8_t>(key, key + key_length),
+ promise.Pass());
+}
+
+void ProxyDecryptor::CancelKeyRequest(const std::string& session_id) {
+ DVLOG(1) << "CancelKeyRequest()";
+
+ if (!media_keys_) {
+ OnLegacySessionError(std::string(), MediaKeys::INVALID_STATE_ERROR, 0,
+ "CDM is not available.");
+ return;
+ }
+
+ scoped_ptr<SimpleCdmPromise> promise(new CdmCallbackPromise<>(
+ base::Bind(&ProxyDecryptor::OnSessionClosed,
+ weak_ptr_factory_.GetWeakPtr(), session_id),
+ base::Bind(&ProxyDecryptor::OnLegacySessionError,
+ weak_ptr_factory_.GetWeakPtr(), session_id)));
+ media_keys_->RemoveSession(session_id, promise.Pass());
+}
+
+void ProxyDecryptor::OnSessionMessage(const std::string& session_id,
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8>& message,
+ const GURL& legacy_destination_url) {
+ // Assumes that OnSessionCreated() has been called before this.
+
+ // For ClearKey, convert the message from JSON into just passing the key
+ // as the message. If unable to extract the key, return the message unchanged.
+ if (is_clear_key_) {
+ std::vector<uint8> key;
+ if (ExtractFirstKeyIdFromLicenseRequest(message, &key)) {
+ key_message_cb_.Run(session_id, key, legacy_destination_url);
+ return;
+ }
+ }
+
+ key_message_cb_.Run(session_id, message, legacy_destination_url);
+}
+
+void ProxyDecryptor::OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info) {
+ // EME v0.1b doesn't support this event.
+}
+
+void ProxyDecryptor::OnSessionExpirationUpdate(
+ const std::string& session_id,
+ const base::Time& new_expiry_time) {
+ // EME v0.1b doesn't support this event.
+}
+
+void ProxyDecryptor::GenerateKeyAdded(const std::string& session_id) {
+ // EME WD doesn't support this event, but it is needed for EME v0.1b.
+ key_added_cb_.Run(session_id);
+}
+
+void ProxyDecryptor::OnSessionClosed(const std::string& session_id) {
+ base::hash_map<std::string, bool>::iterator it =
+ active_sessions_.find(session_id);
+
+ // Latest EME spec separates closing a session ("allows an application to
+ // indicate that it no longer needs the session") and actually closing the
+ // session (done by the CDM at any point "such as in response to a close()
+ // call, when the session is no longer needed, or when system resources are
+ // lost.") Thus the CDM may cause 2 close() events -- one to resolve the
+ // close() promise, and a second to actually close the session. Prefixed EME
+ // only expects 1 close event, so drop the second (and subsequent) events.
+ // However, this means we can't tell if the CDM is generating spurious close()
+ // events.
+ if (it == active_sessions_.end())
+ return;
+
+ if (it->second) {
+ OnLegacySessionError(session_id, MediaKeys::NOT_SUPPORTED_ERROR,
+ kSessionClosedSystemCode,
+ "Do not close persistent sessions.");
+ }
+ active_sessions_.erase(it);
+}
+
+void ProxyDecryptor::OnLegacySessionError(const std::string& session_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ // Convert |error_name| back to MediaKeys::KeyError if possible. Prefixed
+ // EME has different error message, so all the specific error events will
+ // get lost.
+ MediaKeys::KeyError error_code;
+ switch (exception_code) {
+ case MediaKeys::CLIENT_ERROR:
+ error_code = MediaKeys::kClientError;
+ break;
+ case MediaKeys::OUTPUT_ERROR:
+ error_code = MediaKeys::kOutputError;
+ break;
+ default:
+ // This will include all other CDM4 errors and any error generated
+ // by CDM5 or later.
+ error_code = MediaKeys::kUnknownError;
+ break;
+ }
+ key_error_cb_.Run(session_id, error_code, system_code);
+}
+
+void ProxyDecryptor::SetSessionId(SessionCreationType session_type,
+ const std::string& session_id) {
+ // Loaded sessions are considered persistent.
+ bool is_persistent =
+ session_type == PersistentSession || session_type == LoadSession;
+ active_sessions_.insert(std::make_pair(session_id, is_persistent));
+
+ // For LoadSession(), generate the KeyAdded event.
+ if (session_type == LoadSession)
+ GenerateKeyAdded(session_id);
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/proxy_decryptor.h b/chromium/media/cdm/proxy_decryptor.h
new file mode 100644
index 00000000000..810373957f5
--- /dev/null
+++ b/chromium/media/cdm/proxy_decryptor.h
@@ -0,0 +1,160 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PROXY_DECRYPTOR_H_
+#define MEDIA_CDM_PROXY_DECRYPTOR_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/cdm_context.h"
+#include "media/base/decryptor.h"
+#include "media/base/eme_constants.h"
+#include "media/base/media_export.h"
+#include "media/base/media_keys.h"
+#include "url/gurl.h"
+
+namespace media {
+
+class CdmFactory;
+class MediaPermission;
+
+// ProxyDecryptor is for EME v0.1b only. It should not be used for the WD API.
+// A decryptor proxy that creates a real decryptor object on demand and
+// forwards decryptor calls to it.
+//
+// TODO(xhwang): Currently we don't support run-time switching among decryptor
+// objects. Fix this when needed.
+// TODO(xhwang): The ProxyDecryptor is not a Decryptor. Find a better name!
+class MEDIA_EXPORT ProxyDecryptor {
+ public:
+ // Callback to provide a CdmContext when the CDM creation is finished.
+ // If CDM creation failed, |cdm_context| will be null.
+ typedef base::Callback<void(CdmContext* cdm_context)> CdmContextReadyCB;
+
+ // These are similar to the callbacks in media_keys.h, but pass back the
+ // session ID rather than the internal session ID.
+ typedef base::Callback<void(const std::string& session_id)> KeyAddedCB;
+ typedef base::Callback<void(const std::string& session_id,
+ MediaKeys::KeyError error_code,
+ uint32 system_code)> KeyErrorCB;
+ typedef base::Callback<void(const std::string& session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url)> KeyMessageCB;
+
+ ProxyDecryptor(MediaPermission* media_permission,
+ bool use_hw_secure_codecs,
+ const KeyAddedCB& key_added_cb,
+ const KeyErrorCB& key_error_cb,
+ const KeyMessageCB& key_message_cb);
+ virtual ~ProxyDecryptor();
+
+ // Creates the CDM and fires |cdm_created_cb|. This method should only be
+ // called once. If CDM creation failed, all following GenerateKeyRequest,
+ // AddKey and CancelKeyRequest calls will result in a KeyError.
+ void CreateCdm(CdmFactory* cdm_factory,
+ const std::string& key_system,
+ const GURL& security_origin,
+ const CdmContextReadyCB& cdm_context_ready_cb);
+
+ // May only be called after CreateCDM().
+ void GenerateKeyRequest(EmeInitDataType init_data_type,
+ const uint8* init_data,
+ int init_data_length);
+ void AddKey(const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length,
+ const std::string& session_id);
+ void CancelKeyRequest(const std::string& session_id);
+
+ private:
+ // Callback for CreateCdm().
+ void OnCdmCreated(const std::string& key_system,
+ const GURL& security_origin,
+ const CdmContextReadyCB& cdm_context_ready_cb,
+ scoped_ptr<MediaKeys> cdm,
+ const std::string& error_message);
+
+ void GenerateKeyRequestInternal(EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data);
+
+ // Callbacks for firing session events.
+ void OnSessionMessage(const std::string& session_id,
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8>& message,
+ const GURL& legacy_destination_url);
+ void OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info);
+ void OnSessionExpirationUpdate(const std::string& session_id,
+ const base::Time& new_expiry_time);
+ void GenerateKeyAdded(const std::string& session_id);
+ void OnSessionClosed(const std::string& session_id);
+ void OnLegacySessionError(const std::string& session_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message);
+
+ // Callback for permission request.
+ void OnPermissionStatus(MediaKeys::SessionType session_type,
+ EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data,
+ scoped_ptr<NewSessionCdmPromise> promise,
+ bool granted);
+
+ enum SessionCreationType {
+ TemporarySession,
+ PersistentSession,
+ LoadSession
+ };
+
+ // Called when a session is actually created or loaded.
+ void SetSessionId(SessionCreationType session_type,
+ const std::string& session_id);
+
+ struct PendingGenerateKeyRequestData {
+ PendingGenerateKeyRequestData(EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data);
+ ~PendingGenerateKeyRequestData();
+
+ const EmeInitDataType init_data_type;
+ const std::vector<uint8> init_data;
+ };
+
+ bool is_creating_cdm_;
+
+ // The real MediaKeys that manages key operations for the ProxyDecryptor.
+ scoped_ptr<MediaKeys> media_keys_;
+
+ MediaPermission* media_permission_;
+ bool use_hw_secure_codecs_;
+
+ // Callbacks for firing key events.
+ KeyAddedCB key_added_cb_;
+ KeyErrorCB key_error_cb_;
+ KeyMessageCB key_message_cb_;
+
+ std::string key_system_;
+ GURL security_origin_;
+
+ // Keep track of both persistent and non-persistent sessions.
+ base::hash_map<std::string, bool> active_sessions_;
+
+ bool is_clear_key_;
+
+ ScopedVector<PendingGenerateKeyRequestData> pending_requests_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<ProxyDecryptor> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProxyDecryptor);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_PROXY_DECRYPTOR_H_
diff --git a/chromium/media/cdm/stub/stub_cdm.cc b/chromium/media/cdm/stub/stub_cdm.cc
new file mode 100644
index 00000000000..0bfa25ebaa0
--- /dev/null
+++ b/chromium/media/cdm/stub/stub_cdm.cc
@@ -0,0 +1,162 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/stub/stub_cdm.h"
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+
+// Version number for this stub. The third number represents the
+// cdm::ContentDecryptionModule version.
+const char kStubCdmVersion[] = "1.4.8.0";
+
+void INITIALIZE_CDM_MODULE() {
+}
+
+void DeinitializeCdmModule() {
+}
+
+void* CreateCdmInstance(int cdm_interface_version,
+ const char* /* key_system */,
+ uint32_t /* key_system_size */,
+ GetCdmHostFunc get_cdm_host_func,
+ void* user_data) {
+ DVLOG(1) << "CreateCdmInstance()";
+
+ if (cdm_interface_version != media::StubCdmInterface::kVersion)
+ return nullptr;
+
+ media::StubCdmInterface::Host* host =
+ static_cast<media::StubCdmInterface::Host*>(get_cdm_host_func(
+ media::StubCdmInterface::Host::kVersion, user_data));
+ if (!host)
+ return nullptr;
+
+ return new media::StubCdm(host);
+}
+
+const char* GetCdmVersion() {
+ return kStubCdmVersion;
+}
+
+namespace media {
+
+StubCdm::StubCdm(Host* host) : host_(host), next_session_id_(0) {
+}
+
+StubCdm::~StubCdm() {
+}
+
+void StubCdm::Initialize(bool /* allow_distinctive_identifier */,
+ bool /* allow_persistent_state */) {
+}
+
+void StubCdm::CreateSessionAndGenerateRequest(
+ uint32 promise_id,
+ cdm::SessionType /* session_type */,
+ cdm::InitDataType /* init_data_type */,
+ const uint8* /* init_data */,
+ uint32 /* init_data_size */) {
+ // Provide a dummy message (with a trivial session ID) to enable some testing
+ // and be consistent with existing testing without a license server.
+ std::string session_id(base::UintToString(next_session_id_++));
+ host_->OnResolveNewSessionPromise(promise_id, session_id.data(),
+ session_id.length());
+ host_->OnSessionMessage(session_id.data(), session_id.length(),
+ cdm::kLicenseRequest, nullptr, 0, nullptr, 0);
+}
+
+void StubCdm::LoadSession(uint32 promise_id,
+ cdm::SessionType /* session_type */,
+ const char* /* session_id */,
+ uint32_t /* session_id_length */) {
+ FailRequest(promise_id);
+}
+
+void StubCdm::UpdateSession(uint32 promise_id,
+ const char* /* session_id */,
+ uint32_t /* session_id_length */,
+ const uint8* /* response */,
+ uint32 /* response_size */) {
+ FailRequest(promise_id);
+}
+
+void StubCdm::CloseSession(uint32 promise_id,
+ const char* /* session_id */,
+ uint32_t /* session_id_length */) {
+ FailRequest(promise_id);
+}
+
+void StubCdm::RemoveSession(uint32 promise_id,
+ const char* /* session_id */,
+ uint32_t /* session_id_length */) {
+ FailRequest(promise_id);
+}
+
+void StubCdm::SetServerCertificate(
+ uint32 promise_id,
+ const uint8_t* /* server_certificate_data */,
+ uint32_t /* server_certificate_data_size */) {
+ FailRequest(promise_id);
+}
+
+void StubCdm::TimerExpired(void* /* context */) {
+}
+
+cdm::Status StubCdm::Decrypt(const cdm::InputBuffer& /* encrypted_buffer */,
+ cdm::DecryptedBlock* /* decrypted_block */) {
+ return cdm::kDecryptError;
+}
+
+cdm::Status StubCdm::InitializeAudioDecoder(
+ const cdm::AudioDecoderConfig& /* audio_decoder_config */) {
+ return cdm::kDecryptError;
+}
+
+cdm::Status StubCdm::InitializeVideoDecoder(
+ const cdm::VideoDecoderConfig& /* video_decoder_config */) {
+ return cdm::kDecryptError;
+}
+
+void StubCdm::ResetDecoder(cdm::StreamType /* decoder_type */) {
+}
+
+void StubCdm::DeinitializeDecoder(cdm::StreamType /* decoder_type */) {
+}
+
+cdm::Status StubCdm::DecryptAndDecodeFrame(
+ const cdm::InputBuffer& /* encrypted_buffer */,
+ cdm::VideoFrame* /* decoded_frame */) {
+ return cdm::kDecryptError;
+}
+
+cdm::Status StubCdm::DecryptAndDecodeSamples(
+ const cdm::InputBuffer& /* encrypted_buffer */,
+ cdm::AudioFrames* /* audio_frames */) {
+ return cdm::kDecryptError;
+}
+
+void StubCdm::Destroy() {
+ delete this;
+}
+
+void StubCdm::OnPlatformChallengeResponse(
+ const cdm::PlatformChallengeResponse& /* response */) {
+ NOTREACHED();
+}
+
+void StubCdm::OnQueryOutputProtectionStatus(
+ cdm::QueryResult /* result */,
+ uint32_t /* link_mask */,
+ uint32_t /* output_protection_mask */) {
+ NOTREACHED();
+};
+
+void StubCdm::FailRequest(uint32 promise_id) {
+ std::string message("Operation not supported by stub CDM.");
+ host_->OnRejectPromise(promise_id, cdm::kInvalidAccessError, 0,
+ message.data(), message.length());
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/stub/stub_cdm.h b/chromium/media/cdm/stub/stub_cdm.h
new file mode 100644
index 00000000000..fc4d0403c88
--- /dev/null
+++ b/chromium/media/cdm/stub/stub_cdm.h
@@ -0,0 +1,80 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_STUB_STUB_CDM_H_
+#define MEDIA_CDM_STUB_STUB_CDM_H_
+
+#include "base/basictypes.h"
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+
+namespace media {
+
+typedef cdm::ContentDecryptionModule_8 StubCdmInterface;
+
+// Dummy implementation of the cdm::ContentDecryptionModule interface.
+class StubCdm : public StubCdmInterface {
+ public:
+ explicit StubCdm(Host* host);
+ ~StubCdm() override;
+
+ // StubCdmInterface implementation.
+ void Initialize(bool allow_distinctive_identifier,
+ bool allow_persistent_state) override;
+ void CreateSessionAndGenerateRequest(uint32 promise_id,
+ cdm::SessionType session_type,
+ cdm::InitDataType init_data_type,
+ const uint8* init_data,
+ uint32 init_data_size) override;
+ void LoadSession(uint32 promise_id,
+ cdm::SessionType session_type,
+ const char* session_id,
+ uint32_t session_id_length) override;
+ void UpdateSession(uint32 promise_id,
+ const char* session_id,
+ uint32_t session_id_length,
+ const uint8* response,
+ uint32 response_size) override;
+ void CloseSession(uint32 promise_id,
+ const char* session_id,
+ uint32_t session_id_length) override;
+ void RemoveSession(uint32 promise_id,
+ const char* session_id,
+ uint32_t session_id_length) override;
+ void SetServerCertificate(uint32 promise_id,
+ const uint8_t* server_certificate_data,
+ uint32_t server_certificate_data_size) override;
+ void TimerExpired(void* context) override;
+ cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
+ cdm::DecryptedBlock* decrypted_block) override;
+ cdm::Status InitializeAudioDecoder(
+ const cdm::AudioDecoderConfig& audio_decoder_config) override;
+ cdm::Status InitializeVideoDecoder(
+ const cdm::VideoDecoderConfig& video_decoder_config) override;
+ void DeinitializeDecoder(cdm::StreamType decoder_type) override;
+ void ResetDecoder(cdm::StreamType decoder_type) override;
+ cdm::Status DecryptAndDecodeFrame(const cdm::InputBuffer& encrypted_buffer,
+ cdm::VideoFrame* video_frame) override;
+ cdm::Status DecryptAndDecodeSamples(const cdm::InputBuffer& encrypted_buffer,
+ cdm::AudioFrames* audio_frames) override;
+ void Destroy() override;
+ void OnPlatformChallengeResponse(
+ const cdm::PlatformChallengeResponse& response) override;
+ void OnQueryOutputProtectionStatus(cdm::QueryResult result,
+ uint32_t link_mask,
+ uint32_t output_protection_mask) override;
+
+ private:
+ // Helper function that rejects the promise specified by |promise_id|.
+ void FailRequest(uint32 promise_id);
+
+ Host* host_;
+
+ uint32 next_session_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(StubCdm);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_STUB_STUB_CDM_H_
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index 3b92d3db706..3b212ed79ad 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -10,7 +10,6 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/video_frame.h"
#include "media/base/video_util.h"
namespace media {
@@ -18,8 +17,8 @@ namespace media {
// Why FF_INPUT_BUFFER_PADDING_SIZE? FFmpeg assumes all input buffers are
// padded. Check here to ensure FFmpeg only receives data padded to its
// specifications.
-COMPILE_ASSERT(DecoderBuffer::kPaddingSize >= FF_INPUT_BUFFER_PADDING_SIZE,
- decoder_buffer_padding_size_does_not_fit_ffmpeg_requirement);
+static_assert(DecoderBuffer::kPaddingSize >= FF_INPUT_BUFFER_PADDING_SIZE,
+ "DecoderBuffer padding size does not fit ffmpeg requirement");
// Alignment requirement by FFmpeg for input and output buffers. This need to
// be updated to match FFmpeg when it changes.
@@ -30,22 +29,22 @@ static const int kFFmpegBufferAddressAlignment = 32;
#endif
// Check here to ensure FFmpeg only receives data aligned to its specifications.
-COMPILE_ASSERT(
+static_assert(
DecoderBuffer::kAlignmentSize >= kFFmpegBufferAddressAlignment &&
DecoderBuffer::kAlignmentSize % kFFmpegBufferAddressAlignment == 0,
- decoder_buffer_alignment_size_does_not_fit_ffmpeg_requirement);
+ "DecoderBuffer alignment size does not fit ffmpeg requirement");
// Allows faster SIMD YUV convert. Also, FFmpeg overreads/-writes occasionally.
// See video_get_buffer() in libavcodec/utils.c.
static const int kFFmpegOutputBufferPaddingSize = 16;
-COMPILE_ASSERT(VideoFrame::kFrameSizePadding >= kFFmpegOutputBufferPaddingSize,
- video_frame_padding_size_does_not_fit_ffmpeg_requirement);
+static_assert(VideoFrame::kFrameSizePadding >= kFFmpegOutputBufferPaddingSize,
+ "VideoFrame padding size does not fit ffmpeg requirement");
-COMPILE_ASSERT(
+static_assert(
VideoFrame::kFrameAddressAlignment >= kFFmpegBufferAddressAlignment &&
VideoFrame::kFrameAddressAlignment % kFFmpegBufferAddressAlignment == 0,
- video_frame_address_alignment_does_not_fit_ffmpeg_requirement);
+ "VideoFrame frame address alignment does not fit ffmpeg requirement");
static const AVRational kMicrosBase = { 1, base::Time::kMicrosecondsPerSecond };
@@ -92,6 +91,8 @@ static AudioCodec CodecIDToAudioCodec(AVCodecID codec_id) {
return kCodecPCM_MULAW;
case AV_CODEC_ID_OPUS:
return kCodecOpus;
+ case AV_CODEC_ID_ALAC:
+ return kCodecALAC;
default:
DVLOG(1) << "Unknown audio CodecID: " << codec_id;
}
@@ -103,6 +104,8 @@ static AVCodecID AudioCodecToCodecID(AudioCodec audio_codec,
switch (audio_codec) {
case kCodecAAC:
return AV_CODEC_ID_AAC;
+ case kCodecALAC:
+ return AV_CODEC_ID_ALAC;
case kCodecMP3:
return AV_CODEC_ID_MP3;
case kCodecPCM:
@@ -242,6 +245,8 @@ SampleFormat AVSampleFormatToSampleFormat(AVSampleFormat sample_format) {
return kSampleFormatF32;
case AV_SAMPLE_FMT_S16P:
return kSampleFormatPlanarS16;
+ case AV_SAMPLE_FMT_S32P:
+ return kSampleFormatPlanarS32;
case AV_SAMPLE_FMT_FLTP:
return kSampleFormatPlanarF32;
default:
@@ -407,6 +412,12 @@ void AVStreamToVideoDecoderConfig(
coded_size = visible_rect.size();
}
+ // YV12 frames may be in HD color space.
+ if (format == VideoFrame::YV12 &&
+ stream->codec->colorspace == AVCOL_SPC_BT709) {
+ format = VideoFrame::YV12HD;
+ }
+
// Pad out |coded_size| for subsampled YUV formats.
if (format != VideoFrame::YV24) {
coded_size.set_width((coded_size.width() + 1) / 2 * 2);
@@ -546,6 +557,7 @@ PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
case VideoFrame::YV16:
return PIX_FMT_YUV422P;
case VideoFrame::YV12:
+ case VideoFrame::YV12HD:
return PIX_FMT_YUV420P;
case VideoFrame::YV12J:
return PIX_FMT_YUVJ420P;
diff --git a/chromium/media/ffmpeg/ffmpeg_common.h b/chromium/media/ffmpeg/ffmpeg_common.h
index 3616a150aab..a73fddd3716 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.h
+++ b/chromium/media/ffmpeg/ffmpeg_common.h
@@ -32,6 +32,7 @@ extern "C" {
MSVC_PUSH_DISABLE_WARNING(4244);
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
+#include <libavformat/internal.h>
#include <libavformat/avio.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
diff --git a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
index 0d9bdc9980f..ff0730732f9 100644
--- a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -88,6 +89,17 @@ TEST_F(FFmpegCommonTest, UTCDateToTime_Valid) {
EXPECT_EQ(0, exploded.millisecond);
}
+#if defined(ALLOCATOR_SHIM) && defined(GTEST_HAS_DEATH_TEST)
+TEST_F(FFmpegCommonTest, WinAllocatorShimDeathTest) {
+ scoped_ptr<char, base::FreeDeleter> ptr;
+ // INT_MAX - 128 is carefully chosen to be below the default limit for
+ // ffmpeg allocations, but above the maximum allowed limit by the allocator
+ // shim, so we can be certain the code is being hit.
+ EXPECT_DEATH(ptr.reset(static_cast<char*>(av_malloc(INT_MAX - 128))), "");
+ ASSERT_TRUE(!ptr);
+}
+#endif
+
TEST_F(FFmpegCommonTest, UTCDateToTime_Invalid) {
const char* invalid_date_strings[] = {
"",
diff --git a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
index ebb2b9a628c..91fde9105ea 100644
--- a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
+++ b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
@@ -18,51 +18,31 @@
//
// Test cases labeled FLAKY may not always pass, but they should never crash or
// cause any kind of warnings or errors under tooling.
-//
-// Frame hashes must be generated with --video-threads=1 for correctness.
-//
-// Known issues:
-// Cr47325 will generate an UninitValue error under Valgrind inside of the
-// MD5 hashing code. The error occurs due to some problematic error
-// resilence code for H264 inside of FFmpeg. See http://crbug.com/119020
-//
-// Some OGG files leak ~30 bytes of memory, upstream tracking bug:
-// https://ffmpeg.org/trac/ffmpeg/ticket/1244
-//
-// Some OGG files leak hundreds of kilobytes of memory, upstream bug:
-// https://ffmpeg.org/trac/ffmpeg/ticket/1931
-
-#include "media/filters/pipeline_integration_test_base.h"
#include <string>
#include "base/bind.h"
-#include "media/base/test_data_util.h"
+#include "media/test/pipeline_integration_test_base.h"
namespace media {
const char kRegressionTestDataPathPrefix[] = "internal/";
struct RegressionTestData {
- RegressionTestData(const char* filename, PipelineStatus init_status,
- PipelineStatus end_status, const char* video_md5,
- const char* audio_md5)
- : video_md5(video_md5),
- audio_md5(audio_md5),
- filename(std::string(kRegressionTestDataPathPrefix) + filename),
+ RegressionTestData(const char* filename,
+ PipelineStatus init_status,
+ PipelineStatus end_status)
+ : filename(std::string(kRegressionTestDataPathPrefix) + filename),
init_status(init_status),
- end_status(end_status) {
- }
+ end_status(end_status) {}
- const char* video_md5;
- const char* audio_md5;
std::string filename;
PipelineStatus init_status;
PipelineStatus end_status;
};
// Used for tests which just need to run without crashing or tooling errors, but
-// which may have undefined behavior for hashing, etc.
+// which may have undefined PipelineStatus results.
struct FlakyRegressionTestData {
FlakyRegressionTestData(const char* filename)
: filename(std::string(kRegressionTestDataPathPrefix) + filename) {
@@ -81,243 +61,248 @@ class FlakyFFmpegRegressionTest
public PipelineIntegrationTestBase {
};
-#define FFMPEG_TEST_CASE(name, fn, init_status, end_status, video_md5, \
- audio_md5) \
- INSTANTIATE_TEST_CASE_P(name, FFmpegRegressionTest, \
- testing::Values(RegressionTestData(fn, \
- init_status, \
- end_status, \
- video_md5, \
- audio_md5)));
+#define FFMPEG_TEST_CASE(name, fn, init_status, end_status) \
+ INSTANTIATE_TEST_CASE_P( \
+ name, FFmpegRegressionTest, \
+ testing::Values(RegressionTestData(fn, init_status, end_status)));
#define FLAKY_FFMPEG_TEST_CASE(name, fn) \
INSTANTIATE_TEST_CASE_P(FLAKY_##name, FlakyFFmpegRegressionTest, \
testing::Values(FlakyRegressionTestData(fn)));
// Test cases from issues.
-FFMPEG_TEST_CASE(Cr47325, "security/47325.mp4", PIPELINE_OK, PIPELINE_OK,
- "2a7a938c6b5979621cec998f02d9bbb6",
- "3.61,1.64,-3.24,0.12,1.50,-0.86,");
-FFMPEG_TEST_CASE(Cr47761, "crbug47761.ogg", PIPELINE_OK, PIPELINE_OK,
- kNullVideoHash,
- "8.89,8.55,8.88,8.01,8.23,7.69,");
-FFMPEG_TEST_CASE(Cr50045, "crbug50045.mp4", PIPELINE_OK, PIPELINE_OK,
- "c345e9ef9ebfc6bfbcbe3f0ddc3125ba",
- "2.72,-6.27,-6.11,-3.17,-5.58,1.26,");
-FFMPEG_TEST_CASE(Cr62127, "crbug62127.webm", PIPELINE_OK,
- PIPELINE_OK, "a064b2776fc5aef3e9cba47967a75db9",
- kNullAudioHash);
-FFMPEG_TEST_CASE(Cr93620, "security/93620.ogg", PIPELINE_OK, PIPELINE_OK,
- kNullVideoHash,
- "-10.55,-10.10,-10.42,-10.35,-10.29,-10.72,");
-FFMPEG_TEST_CASE(Cr100492, "security/100492.webm", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr100543, "security/100543.webm", PIPELINE_OK, PIPELINE_OK,
- "c16691cc9178db3adbf7e562cadcd6e6",
- "1211.73,304.89,1311.54,371.34,1283.06,299.63,");
-FFMPEG_TEST_CASE(Cr101458, "security/101458.webm", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr108416, "security/108416.webm", PIPELINE_OK, PIPELINE_OK,
- "5cb3a934795cd552753dec7687928291",
- "-17.87,-37.20,-23.33,45.57,8.13,-9.92,");
-FFMPEG_TEST_CASE(Cr110849, "security/110849.mkv",
+FFMPEG_TEST_CASE(Cr47325, "security/47325.mp4", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr47761, "crbug47761.ogg", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr50045, "crbug50045.mp4", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr62127, "crbug62127.webm", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr93620, "security/93620.ogg", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr100492,
+ "security/100492.webm",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(Cr100543, "security/100543.webm", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr101458, "security/101458.webm", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr108416, "security/108416.webm", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr110849,
+ "security/110849.mkv",
DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
+FFMPEG_TEST_CASE(Cr112384,
+ "security/112384.webm",
+ DEMUXER_ERROR_COULD_NOT_PARSE,
+ DEMUXER_ERROR_COULD_NOT_PARSE);
+FFMPEG_TEST_CASE(Cr112976, "security/112976.ogg", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr116927,
+ "security/116927.ogv",
DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr112384, "security/112384.webm",
- DEMUXER_ERROR_COULD_NOT_PARSE, DEMUXER_ERROR_COULD_NOT_PARSE,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr117912, "security/117912.webm", DEMUXER_ERROR_COULD_NOT_OPEN,
- DEMUXER_ERROR_COULD_NOT_OPEN, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr123481, "security/123481.ogv", PIPELINE_OK,
- PIPELINE_OK, "e6dd853fcbd746c8bb2ab2b8fc376fc7",
- "1.28,-0.32,-0.81,0.08,1.66,0.89,");
-FFMPEG_TEST_CASE(Cr132779, "security/132779.webm",
- DEMUXER_ERROR_COULD_NOT_PARSE, DEMUXER_ERROR_COULD_NOT_PARSE,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr140165, "security/140165.ogg", PIPELINE_ERROR_DECODE,
- PIPELINE_ERROR_DECODE, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr140647, "security/140647.ogv", DEMUXER_ERROR_COULD_NOT_OPEN,
- DEMUXER_ERROR_COULD_NOT_OPEN, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr142738, "crbug142738.ogg", PIPELINE_OK, PIPELINE_OK,
- kNullVideoHash,
- "-1.22,0.45,1.79,1.80,-0.30,-1.21,");
-FFMPEG_TEST_CASE(Cr152691, "security/152691.mp3", PIPELINE_ERROR_DECODE,
- PIPELINE_ERROR_DECODE, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr161639, "security/161639.m4a", PIPELINE_ERROR_DECODE,
- PIPELINE_ERROR_DECODE, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr222754, "security/222754.mp4", PIPELINE_ERROR_DECODE,
- PIPELINE_ERROR_DECODE, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr234630a, "security/234630a.mov", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-15.52,-18.90,-15.33,-16.68,-14.41,-15.89,");
-FFMPEG_TEST_CASE(Cr234630b, "security/234630b.mov", PIPELINE_ERROR_DECODE,
- PIPELINE_ERROR_DECODE, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(Cr242786, "security/242786.webm", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-1.72,-0.83,0.84,1.70,1.23,-0.53,");
+ DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
+FFMPEG_TEST_CASE(Cr117912,
+ "security/117912.webm",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(Cr123481, "security/123481.ogv", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr132779,
+ "security/132779.webm",
+ DEMUXER_ERROR_COULD_NOT_PARSE,
+ DEMUXER_ERROR_COULD_NOT_PARSE);
+FFMPEG_TEST_CASE(Cr140165, "security/140165.ogg", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr140647,
+ "security/140647.ogv",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(Cr142738, "crbug142738.ogg", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr152691,
+ "security/152691.mp3",
+ PIPELINE_OK,
+ PIPELINE_ERROR_DECODE);
+FFMPEG_TEST_CASE(Cr161639,
+ "security/161639.m4a",
+ PIPELINE_OK,
+ PIPELINE_ERROR_DECODE);
+FFMPEG_TEST_CASE(Cr222754,
+ "security/222754.mp4",
+ PIPELINE_OK,
+ PIPELINE_ERROR_DECODE);
+FFMPEG_TEST_CASE(Cr234630a, "security/234630a.mov", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr234630b,
+ "security/234630b.mov",
+ PIPELINE_OK,
+ PIPELINE_ERROR_DECODE);
+FFMPEG_TEST_CASE(Cr242786, "security/242786.webm", PIPELINE_OK, PIPELINE_OK);
// Test for out-of-bounds access with slightly corrupt file (detection logic
// thinks it's a MONO file, but actually contains STEREO audio).
-FFMPEG_TEST_CASE(Cr275590, "security/275590.m4a",
- DECODER_ERROR_NOT_SUPPORTED, DEMUXER_ERROR_COULD_NOT_OPEN,
- kNullVideoHash, kNullAudioHash);
+FFMPEG_TEST_CASE(Cr275590,
+ "security/275590.m4a",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(Cr444522, "security/444522.mp4", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr444539,
+ "security/444539.m4a",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(Cr444546,
+ "security/444546.mp4",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(Cr449958,
+ "security/449958.webm",
+ PIPELINE_OK,
+ PIPELINE_ERROR_DECODE);
// General MP4 test cases.
-FFMPEG_TEST_CASE(MP4_0, "security/aac.10419.mp4", DEMUXER_ERROR_COULD_NOT_OPEN,
- DEMUXER_ERROR_COULD_NOT_OPEN, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_1, "security/clockh264aac_200021889.mp4",
- DEMUXER_ERROR_COULD_NOT_OPEN, DEMUXER_ERROR_COULD_NOT_OPEN,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_2, "security/clockh264aac_200701257.mp4", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_5, "security/clockh264aac_3022500.mp4",
- DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
- DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_6, "security/clockh264aac_344289.mp4", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_7, "security/clockh264mp3_187697.mp4",
- DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
+FFMPEG_TEST_CASE(MP4_0,
+ "security/aac.10419.mp4",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(MP4_1,
+ "security/clockh264aac_200021889.mp4",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(MP4_2,
+ "security/clockh264aac_200701257.mp4",
+ PIPELINE_OK,
+ PIPELINE_OK);
+FFMPEG_TEST_CASE(MP4_5,
+ "security/clockh264aac_3022500.mp4",
DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_8, "security/h264.705767.mp4",
- DEMUXER_ERROR_COULD_NOT_PARSE, DEMUXER_ERROR_COULD_NOT_PARSE,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_9, "security/smclockmp4aac_1_0.mp4",
- DEMUXER_ERROR_COULD_NOT_OPEN, DEMUXER_ERROR_COULD_NOT_OPEN,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(MP4_16, "security/looping2.mov",
- DEMUXER_ERROR_COULD_NOT_OPEN, DEMUXER_ERROR_COULD_NOT_OPEN,
- kNullVideoHash, kNullAudioHash);
+ DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
+FFMPEG_TEST_CASE(MP4_6,
+ "security/clockh264aac_344289.mp4",
+ PIPELINE_OK,
+ PIPELINE_OK);
+FFMPEG_TEST_CASE(MP4_7,
+ "security/clockh264mp3_187697.mp4",
+ PIPELINE_OK,
+ PIPELINE_OK);
+FFMPEG_TEST_CASE(MP4_8,
+ "security/h264.705767.mp4",
+ DEMUXER_ERROR_COULD_NOT_PARSE,
+ DEMUXER_ERROR_COULD_NOT_PARSE);
+FFMPEG_TEST_CASE(MP4_9,
+ "security/smclockmp4aac_1_0.mp4",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(MP4_11, "security/null1.mp4", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(MP4_16,
+ "security/looping2.mov",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(MP4_17, "security/assert2.mov", PIPELINE_OK, PIPELINE_OK);
// General OGV test cases.
-FFMPEG_TEST_CASE(OGV_1, "security/out.163.ogv", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_2, "security/out.391.ogv", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_5, "security/smclocktheora_1_0.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_7, "security/smclocktheora_1_102.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_8, "security/smclocktheora_1_104.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_9, "security/smclocktheora_1_110.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_10, "security/smclocktheora_1_179.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_11, "security/smclocktheora_1_20.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_12, "security/smclocktheora_1_723.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_14, "security/smclocktheora_2_10405.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_15, "security/smclocktheora_2_10619.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_16, "security/smclocktheora_2_1075.ogv",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_18, "security/wav.711.ogv", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_19, "security/null1.ogv", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_20, "security/null2.ogv", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_21, "security/assert1.ogv", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(OGV_22, "security/assert2.ogv", DECODER_ERROR_NOT_SUPPORTED,
- DECODER_ERROR_NOT_SUPPORTED, kNullVideoHash, kNullAudioHash);
+FFMPEG_TEST_CASE(OGV_1,
+ "security/out.163.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_2,
+ "security/out.391.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_5,
+ "security/smclocktheora_1_0.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_7,
+ "security/smclocktheora_1_102.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_8,
+ "security/smclocktheora_1_104.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_9,
+ "security/smclocktheora_1_110.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_10,
+ "security/smclocktheora_1_179.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_11,
+ "security/smclocktheora_1_20.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_12,
+ "security/smclocktheora_1_723.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_14,
+ "security/smclocktheora_2_10405.ogv",
+ PIPELINE_OK,
+ PIPELINE_OK);
+FFMPEG_TEST_CASE(OGV_15,
+ "security/smclocktheora_2_10619.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_16,
+ "security/smclocktheora_2_1075.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_17,
+ "security/vorbis.482086.ogv",
+ PIPELINE_OK,
+ PIPELINE_OK);
+FFMPEG_TEST_CASE(OGV_18,
+ "security/wav.711.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_19,
+ "security/null1.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_20,
+ "security/null2.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_21,
+ "security/assert1.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_22,
+ "security/assert2.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(OGV_23,
+ "security/assert2.ogv",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
// General WebM test cases.
-FFMPEG_TEST_CASE(WEBM_1, "security/no-bug.webm", PIPELINE_OK, PIPELINE_OK,
- "39e92700cbb77478fd63f49db855e7e5", kNullAudioHash);
-FFMPEG_TEST_CASE(WEBM_3, "security/out.webm.139771.2965",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(WEBM_4, "security/out.webm.68798.1929",
- DECODER_ERROR_NOT_SUPPORTED, DECODER_ERROR_NOT_SUPPORTED,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(WEBM_5, "frame_size_change.webm", PIPELINE_OK,
- PIPELINE_OK, "d8fcf2896b7400a2261bac9e9ea930f8",
- kNullAudioHash);
+FFMPEG_TEST_CASE(WEBM_0, "security/memcpy.webm", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(WEBM_1, "security/no-bug.webm", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(WEBM_2,
+ "security/uninitialize.webm",
+ DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
+ DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
+FFMPEG_TEST_CASE(WEBM_4,
+ "security/out.webm.68798.1929",
+ DECODER_ERROR_NOT_SUPPORTED,
+ DECODER_ERROR_NOT_SUPPORTED);
+FFMPEG_TEST_CASE(WEBM_5, "frame_size_change.webm", PIPELINE_OK, PIPELINE_OK);
-// Audio Functional Tests
-FFMPEG_TEST_CASE(AUDIO_GAMING_0, "gaming/a_220_00.mp3", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "0.36,1.25,2.98,4.29,4.19,2.76,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_1, "gaming/a_220_00_v2.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "2.17,3.31,5.15,6.33,5.97,4.35,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_2, "gaming/ai_laser1.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "7.70,10.81,13.19,10.07,7.39,7.56,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_3, "gaming/ai_laser2.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "5.99,8.04,9.71,8.69,7.81,7.52,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_4, "gaming/ai_laser3.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-0.32,1.44,3.75,5.88,6.32,3.22,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_5, "gaming/ai_laser4.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "4.75,4.16,2.21,3.01,5.51,6.11,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_6, "gaming/ai_laser5.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "6.04,7.46,8.78,7.32,4.16,3.97,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_7, "gaming/footstep1.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-0.50,0.29,2.35,4.79,5.14,2.24,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_8, "gaming/footstep3.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-2.87,-3.05,-4.10,-3.20,-2.20,-2.20,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_9, "gaming/footstep4.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "10.35,10.74,11.60,12.83,12.69,10.67,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_10, "gaming/laser1.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-9.48,-12.94,-1.75,7.66,5.61,-0.58,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_11, "gaming/laser2.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-7.53,-6.28,3.37,0.73,-5.83,-4.70,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_12, "gaming/laser3.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-13.62,-6.55,2.52,-10.10,-10.68,-5.43,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_13, "gaming/leg1.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "5.62,5.79,5.81,5.60,6.18,6.15,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_14, "gaming/leg2.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "-0.88,1.32,2.74,3.07,0.88,-0.03,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_15, "gaming/leg3.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "17.77,18.59,19.57,18.84,17.62,17.22,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_16, "gaming/lock_on.ogg", PIPELINE_OK,
- PIPELINE_OK, kNullVideoHash,
- "3.08,-4.33,-5.04,-0.24,1.83,5.16,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_17, "gaming/enemy_lock_on.ogg",
- PIPELINE_OK, PIPELINE_OK, kNullVideoHash,
- "-2.24,-1.00,-2.75,-0.87,1.11,-0.58,");
-FFMPEG_TEST_CASE(AUDIO_GAMING_18, "gaming/rocket_launcher.mp3",
- PIPELINE_OK, PIPELINE_OK, kNullVideoHash,
- "-3.08,0.18,2.49,1.98,-2.20,-4.74,");
+// General MKV test cases.
+FFMPEG_TEST_CASE(MKV_0,
+ "security/nested_tags_lang.mka.627.628",
+ PIPELINE_OK,
+ PIPELINE_ERROR_DECODE);
+FFMPEG_TEST_CASE(MKV_1,
+ "security/nested_tags_lang.mka.667.628",
+ PIPELINE_OK,
+ PIPELINE_ERROR_DECODE);
// Allocate gigabytes of memory, likely can't be run on 32bit machines.
-FFMPEG_TEST_CASE(BIG_MEM_1, "security/bigmem1.mov",
- DEMUXER_ERROR_COULD_NOT_OPEN, DEMUXER_ERROR_COULD_NOT_OPEN,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(BIG_MEM_2, "security/looping1.mov",
- DEMUXER_ERROR_COULD_NOT_OPEN, DEMUXER_ERROR_COULD_NOT_OPEN,
- kNullVideoHash, kNullAudioHash);
-FFMPEG_TEST_CASE(BIG_MEM_5, "security/looping5.mov",
- DEMUXER_ERROR_COULD_NOT_OPEN, DEMUXER_ERROR_COULD_NOT_OPEN,
- kNullVideoHash, kNullAudioHash);
+FFMPEG_TEST_CASE(BIG_MEM_1,
+ "security/bigmem1.mov",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(BIG_MEM_2,
+ "security/looping1.mov",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(BIG_MEM_5,
+ "security/looping5.mov",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
FLAKY_FFMPEG_TEST_CASE(BIG_MEM_3, "security/looping3.mov");
FLAKY_FFMPEG_TEST_CASE(BIG_MEM_4, "security/looping4.mov");
@@ -336,44 +321,17 @@ FLAKY_FFMPEG_TEST_CASE(OGV_13, "security/smclocktheora_1_790.ogv");
FLAKY_FFMPEG_TEST_CASE(MP4_3, "security/clockh264aac_300413969.mp4");
FLAKY_FFMPEG_TEST_CASE(MP4_4, "security/clockh264aac_301350139.mp4");
FLAKY_FFMPEG_TEST_CASE(MP4_12, "security/assert1.mov");
+FLAKY_FFMPEG_TEST_CASE(WEBM_3, "security/out.webm.139771.2965");
+
// Not really flaky, but can't pass the seek test.
FLAKY_FFMPEG_TEST_CASE(MP4_10, "security/null1.m4a");
-
-// TODO(wolenetz/dalecurtis): The following have flaky audio hash result.
-// See http://crbug.com/237371
-FLAKY_FFMPEG_TEST_CASE(Cr112976, "security/112976.ogg");
-FLAKY_FFMPEG_TEST_CASE(MKV_0, "security/nested_tags_lang.mka.627.628");
-FLAKY_FFMPEG_TEST_CASE(MKV_1, "security/nested_tags_lang.mka.667.628");
-FLAKY_FFMPEG_TEST_CASE(MP4_11, "security/null1.mp4");
-
-// TODO(wolenetz/dalecurtis): The following have flaky init status: on mac
-// ia32 Chrome, observed PIPELINE_OK instead of DECODER_ERROR_NOT_SUPPORTED.
FLAKY_FFMPEG_TEST_CASE(Cr112670, "security/112670.mp4");
-FLAKY_FFMPEG_TEST_CASE(OGV_17, "security/vorbis.482086.ogv");
-
-// TODO(wolenetz/dalecurtis): The following have flaky init status: on mac
-// ia32 Chrome, observed DUMUXER_ERROR_NO_SUPPORTED_STREAMS instead of
-// DECODER_ERROR_NOT_SUPPORTED.
-FLAKY_FFMPEG_TEST_CASE(Cr116927, "security/116927.ogv");
-FLAKY_FFMPEG_TEST_CASE(WEBM_2, "security/uninitialize.webm");
-
-// Videos with massive gaps between frame timestamps that result in long hangs
-// with our pipeline. Should be uncommented when we support clockless playback.
-// FFMPEG_TEST_CASE(WEBM_0, "security/memcpy.webm", PIPELINE_OK, PIPELINE_OK,
-// kNullVideoHash, kNullAudioHash);
-// FFMPEG_TEST_CASE(MP4_17, "security/assert2.mov", PIPELINE_OK, PIPELINE_OK,
-// kNullVideoHash, kNullAudioHash);
-// FFMPEG_TEST_CASE(OGV_23, "security/assert2.ogv", PIPELINE_OK, PIPELINE_OK,
-// kNullVideoHash, kNullAudioHash);
TEST_P(FFmpegRegressionTest, BasicPlayback) {
if (GetParam().init_status == PIPELINE_OK) {
- ASSERT_TRUE(Start(GetTestDataFilePath(GetParam().filename),
- GetParam().init_status, kHashed));
+ ASSERT_EQ(PIPELINE_OK, Start(GetParam().filename, kClockless));
Play();
ASSERT_EQ(WaitUntilEndedOrError(), GetParam().end_status);
- EXPECT_EQ(GetParam().video_md5, GetVideoHash());
- EXPECT_EQ(GetParam().audio_md5, GetAudioHash());
// Check for ended if the pipeline is expected to finish okay.
if (GetParam().end_status == PIPELINE_OK) {
@@ -383,15 +341,14 @@ TEST_P(FFmpegRegressionTest, BasicPlayback) {
Seek(base::TimeDelta::FromMilliseconds(0));
}
} else {
- ASSERT_FALSE(Start(GetTestDataFilePath(GetParam().filename),
- GetParam().init_status, kHashed));
- EXPECT_EQ(GetParam().video_md5, GetVideoHash());
- EXPECT_EQ(GetParam().audio_md5, GetAudioHash());
+ // Don't bother checking the exact status as we only care that the
+ // pipeline failed to start.
+ EXPECT_NE(PIPELINE_OK, Start(GetParam().filename));
}
}
TEST_P(FlakyFFmpegRegressionTest, BasicPlayback) {
- if (Start(GetTestDataFilePath(GetParam().filename))) {
+ if (Start(GetParam().filename, kClockless) == PIPELINE_OK) {
Play();
WaitUntilEndedOrError();
}
diff --git a/chromium/media/ffmpeg/ffmpeg_unittest.cc b/chromium/media/ffmpeg/ffmpeg_unittest.cc
deleted file mode 100644
index 0bb7fa7ca4f..00000000000
--- a/chromium/media/ffmpeg/ffmpeg_unittest.cc
+++ /dev/null
@@ -1,589 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// ffmpeg_unittests verify that the parts of the FFmpeg API that Chromium uses
-// function as advertised for each media format that Chromium supports. This
-// mostly includes stuff like reporting proper timestamps, seeking to
-// keyframes, and supporting certain features like reordered_opaque.
-//
-
-#include <limits>
-#include <queue>
-
-#include "base/base_paths.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/files/memory_mapped_file.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/path_service.h"
-#include "base/strings/string_util.h"
-#include "base/test/perf_test_suite.h"
-#include "base/test/perf_time_logger.h"
-#include "media/base/media.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-#include "media/filters/ffmpeg_glue.h"
-#include "media/filters/in_memory_url_protocol.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-int main(int argc, char** argv) {
- return base::PerfTestSuite(argc, argv).Run();
-}
-
-namespace media {
-
-// Mirror setting in ffmpeg_video_decoder.
-static const int kDecodeThreads = 2;
-
-class AVPacketQueue {
- public:
- AVPacketQueue() {
- }
-
- ~AVPacketQueue() {
- flush();
- }
-
- bool empty() {
- return packets_.empty();
- }
-
- AVPacket* peek() {
- return packets_.front();
- }
-
- void pop() {
- AVPacket* packet = packets_.front();
- packets_.pop();
- av_free_packet(packet);
- delete packet;
- }
-
- void push(AVPacket* packet) {
- av_dup_packet(packet);
- packets_.push(packet);
- }
-
- void flush() {
- while (!empty()) {
- pop();
- }
- }
-
- private:
- std::queue<AVPacket*> packets_;
-
- DISALLOW_COPY_AND_ASSIGN(AVPacketQueue);
-};
-
-// TODO(dalecurtis): We should really just use PipelineIntegrationTests instead
-// of a one-off step decoder so we're exercising the real pipeline.
-class FFmpegTest : public testing::TestWithParam<const char*> {
- protected:
- FFmpegTest()
- : av_format_context_(NULL),
- audio_stream_index_(-1),
- video_stream_index_(-1),
- decoded_audio_time_(AV_NOPTS_VALUE),
- decoded_audio_duration_(AV_NOPTS_VALUE),
- decoded_video_time_(AV_NOPTS_VALUE),
- decoded_video_duration_(AV_NOPTS_VALUE),
- duration_(AV_NOPTS_VALUE) {
- InitializeFFmpeg();
-
- audio_buffer_.reset(av_frame_alloc());
- video_buffer_.reset(av_frame_alloc());
- }
-
- virtual ~FFmpegTest() {
- }
-
- void OpenAndReadFile(const std::string& name) {
- OpenFile(name);
- OpenCodecs();
- ReadRemainingFile();
- }
-
- void OpenFile(const std::string& name) {
- base::FilePath path;
- PathService::Get(base::DIR_SOURCE_ROOT, &path);
- path = path.AppendASCII("media")
- .AppendASCII("test")
- .AppendASCII("data")
- .AppendASCII("content")
- .AppendASCII(name.c_str());
- EXPECT_TRUE(base::PathExists(path));
-
- CHECK(file_data_.Initialize(path));
- protocol_.reset(new InMemoryUrlProtocol(
- file_data_.data(), file_data_.length(), false));
- glue_.reset(new FFmpegGlue(protocol_.get()));
-
- ASSERT_TRUE(glue_->OpenContext()) << "Could not open " << path.value();
- av_format_context_ = glue_->format_context();
- ASSERT_LE(0, avformat_find_stream_info(av_format_context_, NULL))
- << "Could not find stream information for " << path.value();
-
- // Determine duration by picking max stream duration.
- for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) {
- AVStream* av_stream = av_format_context_->streams[i];
- int64 duration = ConvertFromTimeBase(
- av_stream->time_base, av_stream->duration).InMicroseconds();
- duration_ = std::max(duration_, duration);
- }
-
- // Final check to see if the container itself specifies a duration.
- AVRational av_time_base = {1, AV_TIME_BASE};
- int64 duration =
- ConvertFromTimeBase(av_time_base,
- av_format_context_->duration).InMicroseconds();
- duration_ = std::max(duration_, duration);
- }
-
- void OpenCodecs() {
- for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) {
- AVStream* av_stream = av_format_context_->streams[i];
- AVCodecContext* av_codec_context = av_stream->codec;
- AVCodec* av_codec = avcodec_find_decoder(av_codec_context->codec_id);
-
- EXPECT_TRUE(av_codec)
- << "Could not find AVCodec with CodecID "
- << av_codec_context->codec_id;
-
- av_codec_context->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
- av_codec_context->thread_count = kDecodeThreads;
-
- EXPECT_EQ(0, avcodec_open2(av_codec_context, av_codec, NULL))
- << "Could not open AVCodecContext with CodecID "
- << av_codec_context->codec_id;
-
- if (av_codec->type == AVMEDIA_TYPE_AUDIO) {
- EXPECT_EQ(-1, audio_stream_index_) << "Found multiple audio streams.";
- audio_stream_index_ = static_cast<int>(i);
- } else if (av_codec->type == AVMEDIA_TYPE_VIDEO) {
- EXPECT_EQ(-1, video_stream_index_) << "Found multiple video streams.";
- video_stream_index_ = static_cast<int>(i);
- } else {
- ADD_FAILURE() << "Found unknown stream type.";
- }
- }
- }
-
- void Flush() {
- if (has_audio()) {
- audio_packets_.flush();
- avcodec_flush_buffers(av_audio_context());
- }
- if (has_video()) {
- video_packets_.flush();
- avcodec_flush_buffers(av_video_context());
- }
- }
-
- void ReadUntil(int64 time) {
- while (true) {
- scoped_ptr<AVPacket> packet(new AVPacket());
- if (av_read_frame(av_format_context_, packet.get()) < 0) {
- break;
- }
-
- int stream_index = static_cast<int>(packet->stream_index);
- int64 packet_time = AV_NOPTS_VALUE;
- if (stream_index == audio_stream_index_) {
- packet_time =
- ConvertFromTimeBase(av_audio_stream()->time_base, packet->pts)
- .InMicroseconds();
- audio_packets_.push(packet.release());
- } else if (stream_index == video_stream_index_) {
- packet_time =
- ConvertFromTimeBase(av_video_stream()->time_base, packet->pts)
- .InMicroseconds();
- video_packets_.push(packet.release());
- } else {
- ADD_FAILURE() << "Found packet that belongs to unknown stream.";
- }
-
- if (packet_time > time) {
- break;
- }
- }
- }
-
- void ReadRemainingFile() {
- ReadUntil(std::numeric_limits<int64>::max());
- }
-
- bool StepDecodeAudio() {
- EXPECT_TRUE(has_audio());
- if (!has_audio() || audio_packets_.empty()) {
- return false;
- }
-
- // Decode until output is produced, end of stream, or error.
- while (true) {
- int result = 0;
- int got_audio = 0;
- bool end_of_stream = false;
-
- AVPacket packet;
- if (audio_packets_.empty()) {
- av_init_packet(&packet);
- end_of_stream = true;
- } else {
- memcpy(&packet, audio_packets_.peek(), sizeof(packet));
- }
-
- av_frame_unref(audio_buffer_.get());
- result = avcodec_decode_audio4(av_audio_context(), audio_buffer_.get(),
- &got_audio, &packet);
- if (!audio_packets_.empty()) {
- audio_packets_.pop();
- }
-
- EXPECT_GE(result, 0) << "Audio decode error.";
- if (result < 0 || (got_audio == 0 && end_of_stream)) {
- return false;
- }
-
- if (result > 0) {
- double microseconds = 1.0L * audio_buffer_->nb_samples /
- av_audio_context()->sample_rate *
- base::Time::kMicrosecondsPerSecond;
- decoded_audio_duration_ = static_cast<int64>(microseconds);
-
- if (packet.pts == static_cast<int64>(AV_NOPTS_VALUE)) {
- EXPECT_NE(decoded_audio_time_, static_cast<int64>(AV_NOPTS_VALUE))
- << "We never received an initial timestamped audio packet! "
- << "Looks like there's a seeking/parsing bug in FFmpeg.";
- decoded_audio_time_ += decoded_audio_duration_;
- } else {
- decoded_audio_time_ =
- ConvertFromTimeBase(av_audio_stream()->time_base, packet.pts)
- .InMicroseconds();
- }
- return true;
- }
- }
- return true;
- }
-
- bool StepDecodeVideo() {
- EXPECT_TRUE(has_video());
- if (!has_video() || video_packets_.empty()) {
- return false;
- }
-
- // Decode until output is produced, end of stream, or error.
- while (true) {
- int result = 0;
- int got_picture = 0;
- bool end_of_stream = false;
-
- AVPacket packet;
- if (video_packets_.empty()) {
- av_init_packet(&packet);
- end_of_stream = true;
- } else {
- memcpy(&packet, video_packets_.peek(), sizeof(packet));
- }
-
- av_frame_unref(video_buffer_.get());
- av_video_context()->reordered_opaque = packet.pts;
- result = avcodec_decode_video2(av_video_context(), video_buffer_.get(),
- &got_picture, &packet);
- if (!video_packets_.empty()) {
- video_packets_.pop();
- }
-
- EXPECT_GE(result, 0) << "Video decode error.";
- if (result < 0 || (got_picture == 0 && end_of_stream)) {
- return false;
- }
-
- if (got_picture) {
- AVRational doubled_time_base;
- doubled_time_base.den = av_video_stream()->r_frame_rate.num;
- doubled_time_base.num = av_video_stream()->r_frame_rate.den;
- doubled_time_base.den *= 2;
-
- decoded_video_time_ =
- ConvertFromTimeBase(av_video_stream()->time_base,
- video_buffer_->reordered_opaque)
- .InMicroseconds();
- decoded_video_duration_ =
- ConvertFromTimeBase(doubled_time_base,
- 2 + video_buffer_->repeat_pict)
- .InMicroseconds();
- return true;
- }
- }
- }
-
- void DecodeRemainingAudio() {
- while (StepDecodeAudio()) {}
- }
-
- void DecodeRemainingVideo() {
- while (StepDecodeVideo()) {}
- }
-
- void SeekTo(double position) {
- int64 seek_time =
- static_cast<int64>(position * base::Time::kMicrosecondsPerSecond);
- int flags = AVSEEK_FLAG_BACKWARD;
-
- // Passing -1 as our stream index lets FFmpeg pick a default stream.
- // FFmpeg will attempt to use the lowest-index video stream, if present,
- // followed by the lowest-index audio stream.
- EXPECT_GE(0, av_seek_frame(av_format_context_, -1, seek_time, flags))
- << "Failed to seek to position " << position;
- Flush();
- }
-
- bool has_audio() { return audio_stream_index_ >= 0; }
- bool has_video() { return video_stream_index_ >= 0; }
- int64 decoded_audio_time() { return decoded_audio_time_; }
- int64 decoded_audio_duration() { return decoded_audio_duration_; }
- int64 decoded_video_time() { return decoded_video_time_; }
- int64 decoded_video_duration() { return decoded_video_duration_; }
- int64 duration() { return duration_; }
-
- AVStream* av_audio_stream() {
- return av_format_context_->streams[audio_stream_index_];
- }
- AVStream* av_video_stream() {
- return av_format_context_->streams[video_stream_index_];
- }
- AVCodecContext* av_audio_context() {
- return av_audio_stream()->codec;
- }
- AVCodecContext* av_video_context() {
- return av_video_stream()->codec;
- }
-
- private:
- void InitializeFFmpeg() {
- static bool initialized = false;
- if (initialized) {
- return;
- }
-
- base::FilePath path;
- PathService::Get(base::DIR_MODULE, &path);
- EXPECT_TRUE(InitializeMediaLibrary(path))
- << "Could not initialize media library.";
-
- initialized = true;
- }
-
- AVFormatContext* av_format_context_;
- int audio_stream_index_;
- int video_stream_index_;
- AVPacketQueue audio_packets_;
- AVPacketQueue video_packets_;
-
- scoped_ptr<AVFrame, media::ScopedPtrAVFreeFrame> audio_buffer_;
- scoped_ptr<AVFrame, media::ScopedPtrAVFreeFrame> video_buffer_;
-
- int64 decoded_audio_time_;
- int64 decoded_audio_duration_;
- int64 decoded_video_time_;
- int64 decoded_video_duration_;
- int64 duration_;
-
- base::MemoryMappedFile file_data_;
- scoped_ptr<InMemoryUrlProtocol> protocol_;
- scoped_ptr<FFmpegGlue> glue_;
-
- DISALLOW_COPY_AND_ASSIGN(FFmpegTest);
-};
-
-#define FFMPEG_TEST_CASE(name, extension) \
- INSTANTIATE_TEST_CASE_P(name##_##extension, FFmpegTest, \
- testing::Values(#name "." #extension));
-
-// Covers all our basic formats.
-FFMPEG_TEST_CASE(sync0, mp4);
-FFMPEG_TEST_CASE(sync0, ogv);
-FFMPEG_TEST_CASE(sync0, webm);
-FFMPEG_TEST_CASE(sync1, m4a);
-FFMPEG_TEST_CASE(sync1, mp3);
-FFMPEG_TEST_CASE(sync1, mp4);
-FFMPEG_TEST_CASE(sync1, ogg);
-FFMPEG_TEST_CASE(sync1, ogv);
-FFMPEG_TEST_CASE(sync1, webm);
-FFMPEG_TEST_CASE(sync2, m4a);
-FFMPEG_TEST_CASE(sync2, mp3);
-FFMPEG_TEST_CASE(sync2, mp4);
-FFMPEG_TEST_CASE(sync2, ogg);
-FFMPEG_TEST_CASE(sync2, ogv);
-FFMPEG_TEST_CASE(sync2, webm);
-
-// Covers our LayoutTest file.
-FFMPEG_TEST_CASE(counting, ogv);
-
-TEST_P(FFmpegTest, Perf) {
- {
- base::PerfTimeLogger timer("Opening file");
- OpenFile(GetParam());
- }
- {
- base::PerfTimeLogger timer("Opening codecs");
- OpenCodecs();
- }
- {
- base::PerfTimeLogger timer("Reading file");
- ReadRemainingFile();
- }
- if (has_audio()) {
- base::PerfTimeLogger timer("Decoding audio");
- DecodeRemainingAudio();
- }
- if (has_video()) {
- base::PerfTimeLogger timer("Decoding video");
- DecodeRemainingVideo();
- }
- {
- base::PerfTimeLogger timer("Seeking to zero");
- SeekTo(0);
- }
-}
-
-TEST_P(FFmpegTest, Loop_Audio) {
- OpenAndReadFile(GetParam());
- if (!has_audio()) {
- return;
- }
-
- const int kSteps = 4;
- std::vector<int64> expected_timestamps_;
- for (int i = 0; i < kSteps; ++i) {
- EXPECT_TRUE(StepDecodeAudio());
- expected_timestamps_.push_back(decoded_audio_time());
- }
-
- SeekTo(0);
- ReadRemainingFile();
-
- for (int i = 0; i < kSteps; ++i) {
- EXPECT_TRUE(StepDecodeAudio());
- EXPECT_EQ(expected_timestamps_[i], decoded_audio_time())
- << "Frame " << i << " had a mismatched timestamp.";
- }
-}
-
-TEST_P(FFmpegTest, Loop_Video) {
- OpenAndReadFile(GetParam());
- if (!has_video()) {
- return;
- }
-
- const int kSteps = 4;
- std::vector<int64> expected_timestamps_;
- for (int i = 0; i < kSteps; ++i) {
- EXPECT_TRUE(StepDecodeVideo());
- expected_timestamps_.push_back(decoded_video_time());
- }
-
- SeekTo(0);
- ReadRemainingFile();
-
- for (int i = 0; i < kSteps; ++i) {
- EXPECT_TRUE(StepDecodeVideo());
- EXPECT_EQ(expected_timestamps_[i], decoded_video_time())
- << "Frame " << i << " had a mismatched timestamp.";
- }
-}
-
-TEST_P(FFmpegTest, Seek_Audio) {
- OpenAndReadFile(GetParam());
- if (!has_audio() && duration() >= 0.5) {
- return;
- }
-
- SeekTo(duration() - 0.5);
- ReadRemainingFile();
-
- EXPECT_TRUE(StepDecodeAudio());
- EXPECT_NE(static_cast<int64>(AV_NOPTS_VALUE), decoded_audio_time());
-}
-
-TEST_P(FFmpegTest, Seek_Video) {
- OpenAndReadFile(GetParam());
- if (!has_video() && duration() >= 0.5) {
- return;
- }
-
- SeekTo(duration() - 0.5);
- ReadRemainingFile();
-
- EXPECT_TRUE(StepDecodeVideo());
- EXPECT_NE(static_cast<int64>(AV_NOPTS_VALUE), decoded_video_time());
-}
-
-TEST_P(FFmpegTest, Decode_Audio) {
- OpenAndReadFile(GetParam());
- if (!has_audio()) {
- return;
- }
-
- int64 last_audio_time = AV_NOPTS_VALUE;
- while (StepDecodeAudio()) {
- ASSERT_GT(decoded_audio_time(), last_audio_time);
- last_audio_time = decoded_audio_time();
- }
-}
-
-TEST_P(FFmpegTest, Decode_Video) {
- OpenAndReadFile(GetParam());
- if (!has_video()) {
- return;
- }
-
- int64 last_video_time = AV_NOPTS_VALUE;
- while (StepDecodeVideo()) {
- ASSERT_GT(decoded_video_time(), last_video_time);
- last_video_time = decoded_video_time();
- }
-}
-
-TEST_P(FFmpegTest, Duration) {
- OpenAndReadFile(GetParam());
-
- if (has_audio()) {
- DecodeRemainingAudio();
- }
-
- if (has_video()) {
- DecodeRemainingVideo();
- }
-
- double expected = static_cast<double>(duration());
- double actual = static_cast<double>(
- std::max(decoded_audio_time() + decoded_audio_duration(),
- decoded_video_time() + decoded_video_duration()));
- EXPECT_NEAR(expected, actual, 500000)
- << "Duration is off by more than 0.5 seconds.";
-}
-
-TEST_F(FFmpegTest, VideoPlayedCollapse) {
- OpenFile("test.ogv");
- OpenCodecs();
-
- SeekTo(0.5);
- ReadRemainingFile();
- EXPECT_TRUE(StepDecodeVideo());
- VLOG(1) << decoded_video_time();
-
- SeekTo(2.83);
- ReadRemainingFile();
- EXPECT_TRUE(StepDecodeVideo());
- VLOG(1) << decoded_video_time();
-
- SeekTo(0.4);
- ReadRemainingFile();
- EXPECT_TRUE(StepDecodeVideo());
- VLOG(1) << decoded_video_time();
-}
-
-} // namespace media
diff --git a/chromium/media/filters/audio_clock.cc b/chromium/media/filters/audio_clock.cc
index 117d6038205..aebc5e55bef 100644
--- a/chromium/media/filters/audio_clock.cc
+++ b/chromium/media/filters/audio_clock.cc
@@ -28,7 +28,7 @@ AudioClock::~AudioClock() {
void AudioClock::WroteAudio(int frames_written,
int frames_requested,
int delay_frames,
- float playback_rate) {
+ double playback_rate) {
DCHECK_GE(frames_written, 0);
DCHECK_LE(frames_written, frames_requested);
DCHECK_GE(delay_frames, 0);
@@ -80,18 +80,9 @@ void AudioClock::WroteAudio(int frames_written,
microseconds_per_frame_);
}
-base::TimeDelta AudioClock::TimestampSinceWriting(
- base::TimeDelta time_since_writing) const {
- int64_t frames_played_since_writing = std::min(
- total_buffered_frames_,
- static_cast<int64_t>(time_since_writing.InSecondsF() * sample_rate_));
- return front_timestamp_ +
- ComputeBufferedMediaTime(frames_played_since_writing);
-}
-
base::TimeDelta AudioClock::TimeUntilPlayback(base::TimeDelta timestamp) const {
- DCHECK(timestamp >= front_timestamp_);
- DCHECK(timestamp <= back_timestamp_);
+ DCHECK_GE(timestamp, front_timestamp_);
+ DCHECK_LE(timestamp, back_timestamp_);
int64_t frames_until_timestamp = 0;
double timestamp_us = timestamp.InMicroseconds();
@@ -126,11 +117,11 @@ base::TimeDelta AudioClock::TimeUntilPlayback(base::TimeDelta timestamp) const {
microseconds_per_frame_);
}
-AudioClock::AudioData::AudioData(int64_t frames, float playback_rate)
+AudioClock::AudioData::AudioData(int64_t frames, double playback_rate)
: frames(frames), playback_rate(playback_rate) {
}
-void AudioClock::PushBufferedAudioData(int64_t frames, float playback_rate) {
+void AudioClock::PushBufferedAudioData(int64_t frames, double playback_rate) {
if (frames == 0)
return;
diff --git a/chromium/media/filters/audio_clock.h b/chromium/media/filters/audio_clock.h
index 6472f11319e..fe462aba5d5 100644
--- a/chromium/media/filters/audio_clock.h
+++ b/chromium/media/filters/audio_clock.h
@@ -57,7 +57,7 @@ class MEDIA_EXPORT AudioClock {
void WroteAudio(int frames_written,
int frames_requested,
int delay_frames,
- float playback_rate);
+ double playback_rate);
// Returns the bounds of media data currently buffered by the audio hardware,
// taking silence and changes in playback rate into account. Buffered audio
@@ -77,13 +77,6 @@ class MEDIA_EXPORT AudioClock {
base::TimeDelta front_timestamp() const { return front_timestamp_; }
base::TimeDelta back_timestamp() const { return back_timestamp_; }
- // Clients can provide |time_since_writing| to simulate the passage of time
- // since last writing audio to get a more accurate current media timestamp.
- //
- // The value will be bounded between front_timestamp() and back_timestamp().
- base::TimeDelta TimestampSinceWriting(
- base::TimeDelta time_since_writing) const;
-
// Returns the amount of wall time until |timestamp| will be played by the
// audio hardware.
//
@@ -109,14 +102,14 @@ class MEDIA_EXPORT AudioClock {
//
// 32 bits on the other hand would top out at measly 2 hours and 20 minutes.
struct AudioData {
- AudioData(int64_t frames, float playback_rate);
+ AudioData(int64_t frames, double playback_rate);
int64_t frames;
- float playback_rate;
+ double playback_rate;
};
// Helpers for operating on |buffered_|.
- void PushBufferedAudioData(int64_t frames, float playback_rate);
+ void PushBufferedAudioData(int64_t frames, double playback_rate);
void PopBufferedAudioData(int64_t frames);
base::TimeDelta ComputeBufferedMediaTime(int64_t frames) const;
diff --git a/chromium/media/filters/audio_clock_unittest.cc b/chromium/media/filters/audio_clock_unittest.cc
index 557fa7c9686..3fe437ee24c 100644
--- a/chromium/media/filters/audio_clock_unittest.cc
+++ b/chromium/media/filters/audio_clock_unittest.cc
@@ -19,7 +19,7 @@ class AudioClockTest : public testing::Test {
void WroteAudio(int frames_written,
int frames_requested,
int delay_frames,
- float playback_rate) {
+ double playback_rate) {
clock_.WroteAudio(
frames_written, frames_requested, delay_frames, playback_rate);
}
@@ -34,11 +34,6 @@ class AudioClockTest : public testing::Test {
return clock_.back_timestamp().InMilliseconds();
}
- int TimestampSinceLastWritingInMilliseconds(int milliseconds) {
- return clock_.TimestampSinceWriting(base::TimeDelta::FromMilliseconds(
- milliseconds)).InMilliseconds();
- }
-
int TimeUntilPlaybackInMilliseconds(int timestamp_ms) {
return clock_.TimeUntilPlayback(base::TimeDelta::FromMilliseconds(
timestamp_ms)).InMilliseconds();
@@ -78,14 +73,6 @@ TEST_F(AudioClockTest, BackTimestampStartsAtStartTimestamp) {
EXPECT_EQ(expected, clock.back_timestamp());
}
-TEST_F(AudioClockTest, TimestampSinceWritingStartsAtStartTimestamp) {
- base::TimeDelta expected = base::TimeDelta::FromSeconds(123);
- AudioClock clock(expected, sample_rate_);
-
- base::TimeDelta time_since_writing = base::TimeDelta::FromSeconds(456);
- EXPECT_EQ(expected, clock.TimestampSinceWriting(time_since_writing));
-}
-
TEST_F(AudioClockTest, ContiguousAudioDataBufferedStartsAtZero) {
EXPECT_EQ(base::TimeDelta(), clock_.contiguous_audio_data_buffered());
}
@@ -280,47 +267,6 @@ TEST_F(AudioClockTest, ZeroDelay) {
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
}
-TEST_F(AudioClockTest, TimestampSinceLastWriting) {
- // Construct an audio clock with the following representation:
- //
- // |- existing delay -|------------ calls to WroteAudio() -----------------|
- // +-------------------+----------------+------------------+----------------+
- // | 20 frames silence | 10 frames @ 1x | 10 frames @ 0.5x | 10 frames @ 2x |
- // +-------------------+----------------+------------------+----------------+
- // Media timestamp: 0 1000 1500 3500
- // Wall clock time: 2000 3000 4000 5000
- WroteAudio(10, 10, 40, 1.0);
- WroteAudio(10, 10, 40, 0.5);
- WroteAudio(10, 10, 40, 2.0);
- EXPECT_EQ(0, FrontTimestampInMilliseconds());
- EXPECT_EQ(3500, BackTimestampInMilliseconds());
- EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
-
- // Simulate passing 2000ms of initial delay in the audio hardware.
- EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(0));
- EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(500));
- EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(1000));
- EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(1500));
- EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(2000));
-
- // Now we should see the 1.0x buffer.
- EXPECT_EQ(500, TimestampSinceLastWritingInMilliseconds(2500));
- EXPECT_EQ(1000, TimestampSinceLastWritingInMilliseconds(3000));
-
- // Now we should see the 0.5x buffer.
- EXPECT_EQ(1250, TimestampSinceLastWritingInMilliseconds(3500));
- EXPECT_EQ(1500, TimestampSinceLastWritingInMilliseconds(4000));
-
- // Now we should see the 2.0x buffer.
- EXPECT_EQ(2500, TimestampSinceLastWritingInMilliseconds(4500));
- EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(5000));
-
- // Times beyond the known length of the audio clock should return the last
- // media timestamp we know of.
- EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(5001));
- EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(6000));
-}
-
TEST_F(AudioClockTest, TimeUntilPlayback) {
// Construct an audio clock with the following representation:
//
diff --git a/chromium/media/filters/audio_decoder_selector_unittest.cc b/chromium/media/filters/audio_decoder_selector_unittest.cc
index 3b55925a803..2bb1f6d263e 100644
--- a/chromium/media/filters/audio_decoder_selector_unittest.cc
+++ b/chromium/media/filters/audio_decoder_selector_unittest.cc
@@ -92,13 +92,6 @@ class AudioDecoderSelectorTest : public ::testing::Test {
void InitializeDecoderSelector(DecryptorCapability decryptor_capability,
int num_decoders) {
- SetDecryptorReadyCB set_decryptor_ready_cb;
- if (decryptor_capability != kNoDecryptor) {
- set_decryptor_ready_cb =
- base::Bind(&AudioDecoderSelectorTest::SetDecryptorReadyCallback,
- base::Unretained(this));
- }
-
if (decryptor_capability == kDecryptOnly ||
decryptor_capability == kDecryptAndDecode) {
EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
@@ -121,6 +114,10 @@ class AudioDecoderSelectorTest : public ::testing::Test {
// Set and cancel DecryptorReadyCB but the callback is never fired.
EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
.Times(2);
+ } else if (decryptor_capability == kNoDecryptor) {
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
+ .WillRepeatedly(
+ RunCallback<0>(nullptr, base::Bind(&IgnoreCdmAttached)));
}
DCHECK_GE(all_decoders_.size(), static_cast<size_t>(num_decoders));
@@ -128,18 +125,18 @@ class AudioDecoderSelectorTest : public ::testing::Test {
all_decoders_.begin() + num_decoders, all_decoders_.end());
decoder_selector_.reset(new AudioDecoderSelector(
- message_loop_.message_loop_proxy(),
- all_decoders_.Pass(),
- set_decryptor_ready_cb));
+ message_loop_.message_loop_proxy(), all_decoders_.Pass()));
}
void SelectDecoder() {
decoder_selector_->SelectDecoder(
demuxer_stream_.get(),
- false,
+ base::Bind(&AudioDecoderSelectorTest::SetDecryptorReadyCallback,
+ base::Unretained(this)),
base::Bind(&AudioDecoderSelectorTest::MockOnDecoderSelected,
base::Unretained(this)),
- base::Bind(&AudioDecoderSelectorTest::OnDecoderOutput));
+ base::Bind(&AudioDecoderSelectorTest::OnDecoderOutput),
+ base::Bind(&AudioDecoderSelectorTest::OnWaitingForDecryptionKey));
message_loop_.RunUntilIdle();
}
@@ -155,6 +152,10 @@ class AudioDecoderSelectorTest : public ::testing::Test {
NOTREACHED();
}
+ static void OnWaitingForDecryptionKey() {
+ NOTREACHED();
+ }
+
// Declare |decoder_selector_| after |demuxer_stream_| and |decryptor_| since
// |demuxer_stream_| and |decryptor_| should outlive |decoder_selector_|.
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
diff --git a/chromium/media/filters/audio_decoder_unittest.cc b/chromium/media/filters/audio_decoder_unittest.cc
index 1835d2985b0..42b00ee13b8 100644
--- a/chromium/media/filters/audio_decoder_unittest.cc
+++ b/chromium/media/filters/audio_decoder_unittest.cc
@@ -186,6 +186,8 @@ class AudioDecoderTest : public testing::TestWithParam<DecoderTestData> {
reader_->GetAVStreamForTesting()->time_base, packet.pts));
buffer->set_duration(ConvertFromTimeBase(
reader_->GetAVStreamForTesting()->time_base, packet.duration));
+ if (packet.flags & AV_PKT_FLAG_KEY)
+ buffer->set_is_key_frame(true);
// Don't set discard padding for Opus, it already has discard behavior set
// based on the codec delay in the AudioDecoderConfig.
diff --git a/chromium/media/filters/audio_file_reader.cc b/chromium/media/filters/audio_file_reader.cc
index 3b14355cd41..70b60d757ef 100644
--- a/chromium/media/filters/audio_file_reader.cc
+++ b/chromium/media/filters/audio_file_reader.cc
@@ -10,7 +10,6 @@
#include "base/time/time.h"
#include "media/base/audio_bus.h"
#include "media/ffmpeg/ffmpeg_common.h"
-#include "media/filters/ffmpeg_glue.h"
namespace media {
@@ -20,7 +19,6 @@ AudioFileReader::AudioFileReader(FFmpegURLProtocol* protocol)
protocol_(protocol),
channels_(0),
sample_rate_(0),
- end_padding_(0),
av_sample_format_(0) {
}
@@ -66,13 +64,6 @@ bool AudioFileReader::OpenDemuxer() {
return false;
}
- // Attempt to extract end padding for mp3 files.
- if (strcmp(format_context->iformat->name, "mp3") == 0 &&
- (av_opt_get_int(format_context->priv_data, "end_pad", 0, &end_padding_) <
- 0 ||
- end_padding_ < 0)) {
- end_padding_ = 0;
- }
return true;
}
@@ -233,12 +224,6 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
av_free_packet(&packet);
}
- // If decoding completed successfully try to strip end padding.
- if (continue_decoding && end_padding_ <= current_frame) {
- DCHECK_GE(end_padding_, 0);
- current_frame -= end_padding_;
- }
-
// Zero any remaining frames.
audio_bus->ZeroFramesPartial(
current_frame, audio_bus->frames() - current_frame);
diff --git a/chromium/media/filters/audio_file_reader.h b/chromium/media/filters/audio_file_reader.h
index 963baa7346c..c700b3288fc 100644
--- a/chromium/media/filters/audio_file_reader.h
+++ b/chromium/media/filters/audio_file_reader.h
@@ -87,7 +87,6 @@ class MEDIA_EXPORT AudioFileReader {
FFmpegURLProtocol* protocol_;
int channels_;
int sample_rate_;
- int64_t end_padding_;
// AVSampleFormat initially requested; not Chrome's SampleFormat.
int av_sample_format_;
diff --git a/chromium/media/filters/audio_file_reader_unittest.cc b/chromium/media/filters/audio_file_reader_unittest.cc
index 6b9bf9d9cb1..d83a5d3158f 100644
--- a/chromium/media/filters/audio_file_reader_unittest.cc
+++ b/chromium/media/filters/audio_file_reader_unittest.cc
@@ -195,12 +195,12 @@ TEST_F(AudioFileReaderTest, WaveF32LE) {
#if defined(USE_PROPRIETARY_CODECS)
TEST_F(AudioFileReaderTest, MP3) {
RunTest("sfx.mp3",
- "5.59,7.11,6.63,6.23,5.58,5.22,",
+ "1.30,2.72,4.56,5.08,3.74,2.03,",
1,
44100,
base::TimeDelta::FromMicroseconds(313470),
13825,
- 10496);
+ 11025);
}
TEST_F(AudioFileReaderTest, CorruptMP3) {
diff --git a/chromium/media/filters/audio_renderer_algorithm.cc b/chromium/media/filters/audio_renderer_algorithm.cc
index b604b9ee273..2d2bfbfe71f 100644
--- a/chromium/media/filters/audio_renderer_algorithm.cc
+++ b/chromium/media/filters/audio_renderer_algorithm.cc
@@ -8,8 +8,6 @@
#include <cmath>
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/limits.h"
#include "media/filters/wsola_internals.h"
@@ -49,8 +47,8 @@ namespace media {
// Max/min supported playback rates for fast/slow audio. Audio outside of these
// ranges are muted.
// Audio at these speeds would sound better under a frequency domain algorithm.
-static const float kMinPlaybackRate = 0.5f;
-static const float kMaxPlaybackRate = 4.0f;
+static const double kMinPlaybackRate = 0.5;
+static const double kMaxPlaybackRate = 4.0;
// Overlap-and-add window size in milliseconds.
static const int kOlaWindowSizeMs = 20;
@@ -68,9 +66,9 @@ static const int kMaxCapacityInSeconds = 3;
// maintain this number of frames.
static const int kStartingBufferSizeInFrames = 16 * 512;
-COMPILE_ASSERT(kStartingBufferSizeInFrames <
- (kMaxCapacityInSeconds * limits::kMinSampleRate),
- max_capacity_smaller_than_starting_buffer_size);
+static_assert(kStartingBufferSizeInFrames <
+ (kMaxCapacityInSeconds * limits::kMinSampleRate),
+ "max capacity smaller than starting buffer size");
AudioRendererAlgorithm::AudioRendererAlgorithm()
: channels_(0),
@@ -142,8 +140,9 @@ void AudioRendererAlgorithm::Initialize(const AudioParameters& params) {
}
int AudioRendererAlgorithm::FillBuffer(AudioBus* dest,
+ int dest_offset,
int requested_frames,
- float playback_rate) {
+ double playback_rate) {
if (playback_rate == 0)
return 0;
@@ -162,7 +161,7 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest,
// time.
muted_partial_frame_ += frames_to_render * playback_rate;
int seek_frames = static_cast<int>(muted_partial_frame_);
- dest->ZeroFrames(frames_to_render);
+ dest->ZeroFramesPartial(dest_offset, frames_to_render);
audio_buffer_.SeekFrames(seek_frames);
// Determine the partial frame that remains to be skipped for next call. If
@@ -182,15 +181,17 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest,
if (ola_window_size_ <= faster_step && slower_step >= ola_window_size_) {
const int frames_to_copy =
std::min(audio_buffer_.frames(), requested_frames);
- const int frames_read = audio_buffer_.ReadFrames(frames_to_copy, 0, dest);
+ const int frames_read =
+ audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest);
DCHECK_EQ(frames_read, frames_to_copy);
return frames_read;
}
int rendered_frames = 0;
do {
- rendered_frames += WriteCompletedFramesTo(
- requested_frames - rendered_frames, rendered_frames, dest);
+ rendered_frames +=
+ WriteCompletedFramesTo(requested_frames - rendered_frames,
+ dest_offset + rendered_frames, dest);
} while (rendered_frames < requested_frames &&
RunOneWsolaIteration(playback_rate));
return rendered_frames;
@@ -234,7 +235,7 @@ bool AudioRendererAlgorithm::CanPerformWsola() const {
search_block_index_ + search_block_size <= frames;
}
-bool AudioRendererAlgorithm::RunOneWsolaIteration(float playback_rate) {
+bool AudioRendererAlgorithm::RunOneWsolaIteration(double playback_rate) {
if (!CanPerformWsola())
return false;
@@ -260,7 +261,7 @@ bool AudioRendererAlgorithm::RunOneWsolaIteration(float playback_rate) {
return true;
}
-void AudioRendererAlgorithm::UpdateOutputTime(float playback_rate,
+void AudioRendererAlgorithm::UpdateOutputTime(double playback_rate,
double time_change) {
output_time_ += time_change;
// Center of the search region, in frames.
@@ -269,7 +270,7 @@ void AudioRendererAlgorithm::UpdateOutputTime(float playback_rate,
search_block_index_ = search_block_center_index - search_block_center_offset_;
}
-void AudioRendererAlgorithm::RemoveOldInputFrames(float playback_rate) {
+void AudioRendererAlgorithm::RemoveOldInputFrames(double playback_rate) {
const int earliest_used_index = std::min(target_block_index_,
search_block_index_);
if (earliest_used_index <= 0)
diff --git a/chromium/media/filters/audio_renderer_algorithm.h b/chromium/media/filters/audio_renderer_algorithm.h
index 68b18a54bb5..2005bfeb950 100644
--- a/chromium/media/filters/audio_renderer_algorithm.h
+++ b/chromium/media/filters/audio_renderer_algorithm.h
@@ -45,8 +45,13 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
//
// Data from |audio_buffer_| is consumed in proportion to the playback rate.
//
+ // |dest_offset| is the offset in frames for writing into |dest|.
+ //
// Returns the number of frames copied into |dest|.
- int FillBuffer(AudioBus* dest, int requested_frames, float playback_rate);
+ int FillBuffer(AudioBus* dest,
+ int dest_offset,
+ int requested_frames,
+ double playback_rate);
// Clears |audio_buffer_|.
void FlushBuffers();
@@ -96,15 +101,15 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Run one iteration of WSOLA, if there are sufficient frames. This will
// overlap-and-add one block to |wsola_output_|, hence, |num_complete_frames_|
// is incremented by |ola_hop_size_|.
- bool RunOneWsolaIteration(float playback_rate);
+ bool RunOneWsolaIteration(double playback_rate);
// Seek |audio_buffer_| forward to remove frames from input that are not used
// any more. State of the WSOLA will be updated accordingly.
- void RemoveOldInputFrames(float playback_rate);
+ void RemoveOldInputFrames(double playback_rate);
// Update |output_time_| by |time_change|. In turn |search_block_index_| is
// updated.
- void UpdateOutputTime(float playback_rate, double time_change);
+ void UpdateOutputTime(double playback_rate, double time_change);
// Is |target_block_| fully within |search_block_|? If so, we don't need to
// perform the search.
diff --git a/chromium/media/filters/audio_renderer_algorithm_unittest.cc b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
index 37584ea12f5..003cd512da4 100644
--- a/chromium/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
@@ -145,16 +145,20 @@ class AudioRendererAlgorithmTest : public testing::Test {
}
}
- bool AudioDataIsMuted(AudioBus* audio_data, int frames_written) {
- for (int ch = 0; ch < channels_; ++ch) {
- for (int i = 0; i < frames_written; ++i) {
- if (audio_data->channel(ch)[i] != 0.0f)
+ bool VerifyAudioData(AudioBus* bus, int offset, int frames, float value) {
+ for (int ch = 0; ch < bus->channels(); ++ch) {
+ for (int i = offset; i < offset + frames; ++i) {
+ if (bus->channel(ch)[i] != value)
return false;
}
}
return true;
}
+ bool AudioDataIsMuted(AudioBus* audio_data, int frames_written) {
+ return VerifyAudioData(audio_data, 0, frames_written, 0);
+ }
+
int ComputeConsumedFrames(int initial_frames_enqueued,
int initial_frames_buffered) {
int frame_delta = frames_enqueued_ - initial_frames_enqueued;
@@ -183,7 +187,7 @@ class AudioRendererAlgorithmTest : public testing::Test {
AudioBus::Create(channels_, buffer_size_in_frames);
if (playback_rate == 0.0) {
int frames_written = algorithm_.FillBuffer(
- bus.get(), buffer_size_in_frames, playback_rate);
+ bus.get(), 0, buffer_size_in_frames, playback_rate);
EXPECT_EQ(0, frames_written);
return;
}
@@ -195,7 +199,7 @@ class AudioRendererAlgorithmTest : public testing::Test {
while (frames_remaining > 0) {
int frames_requested = std::min(buffer_size_in_frames, frames_remaining);
int frames_written =
- algorithm_.FillBuffer(bus.get(), frames_requested, playback_rate);
+ algorithm_.FillBuffer(bus.get(), 0, frames_requested, playback_rate);
ASSERT_GT(frames_written, 0) << "Requested: " << frames_requested
<< ", playing at " << playback_rate;
@@ -235,7 +239,7 @@ class AudioRendererAlgorithmTest : public testing::Test {
EXPECT_NEAR(playback_rate, actual_playback_rate, playback_rate / 100.0);
}
- void WsolaTest(float playback_rate) {
+ void WsolaTest(double playback_rate) {
const int kSampleRateHz = 48000;
const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
const int kBytesPerSample = 2;
@@ -281,7 +285,8 @@ class AudioRendererAlgorithmTest : public testing::Test {
for (int n = 0; n < kNumRequestedPulses; ++n) {
int num_buffered_frames = 0;
while (num_buffered_frames < kPulseWidthSamples) {
- int num_samples = algorithm_.FillBuffer(output.get(), 1, playback_rate);
+ int num_samples =
+ algorithm_.FillBuffer(output.get(), 0, 1, playback_rate);
ASSERT_LE(num_samples, 1);
if (num_samples > 0) {
output->CopyPartialFramesTo(0, num_samples, num_buffered_frames,
@@ -635,11 +640,46 @@ TEST_F(AudioRendererAlgorithmTest, QuadraticInterpolation_Colinear) {
}
TEST_F(AudioRendererAlgorithmTest, WsolaSlowdown) {
- WsolaTest(0.6f);
+ WsolaTest(0.6);
}
TEST_F(AudioRendererAlgorithmTest, WsolaSpeedup) {
- WsolaTest(1.6f);
+ WsolaTest(1.6);
+}
+
+TEST_F(AudioRendererAlgorithmTest, FillBufferOffset) {
+ Initialize();
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels_, kFrameSize);
+
+ // Verify that the first half of |bus| remains zero and the last half is
+ // filled appropriately at normal, above normal, below normal, and muted
+ // rates.
+ const int kHalfSize = kFrameSize / 2;
+ const float kAudibleRates[] = {1.0f, 2.0f, 0.5f};
+ for (size_t i = 0; i < arraysize(kAudibleRates); ++i) {
+ SCOPED_TRACE(kAudibleRates[i]);
+ bus->Zero();
+
+ const int frames_filled = algorithm_.FillBuffer(
+ bus.get(), kHalfSize, kHalfSize, kAudibleRates[i]);
+ ASSERT_EQ(kHalfSize, frames_filled);
+ ASSERT_TRUE(VerifyAudioData(bus.get(), 0, kHalfSize, 0));
+ ASSERT_FALSE(VerifyAudioData(bus.get(), kHalfSize, kHalfSize, 0));
+ }
+
+ const float kMutedRates[] = {5.0f, 0.25f};
+ for (size_t i = 0; i < arraysize(kMutedRates); ++i) {
+ SCOPED_TRACE(kMutedRates[i]);
+ for (int ch = 0; ch < bus->channels(); ++ch)
+ std::fill(bus->channel(ch), bus->channel(ch) + bus->frames(), 1.0f);
+
+ const int frames_filled =
+ algorithm_.FillBuffer(bus.get(), kHalfSize, kHalfSize, kMutedRates[i]);
+ ASSERT_EQ(kHalfSize, frames_filled);
+ ASSERT_FALSE(VerifyAudioData(bus.get(), 0, kHalfSize, 0));
+ ASSERT_TRUE(VerifyAudioData(bus.get(), kHalfSize, kHalfSize, 0));
+ }
}
} // namespace media
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index 55ff88c2dac..d641ded72c0 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -98,14 +98,16 @@ class SourceState {
SourceState(
scoped_ptr<StreamParser> stream_parser,
scoped_ptr<FrameProcessor> frame_processor, const LogCB& log_cb,
- const CreateDemuxerStreamCB& create_demuxer_stream_cb);
+ const CreateDemuxerStreamCB& create_demuxer_stream_cb,
+ const scoped_refptr<MediaLog>& media_log);
~SourceState();
void Init(const StreamParser::InitCB& init_cb,
bool allow_audio,
bool allow_video,
- const StreamParser::NeedKeyCB& need_key_cb,
+ const StreamParser::EncryptedMediaInitDataCB&
+ encrypted_media_init_data_cb,
const NewTextTrackCB& new_text_track_cb);
// Appends new data to the StreamParser.
@@ -193,8 +195,7 @@ class SourceState {
const StreamParser::BufferQueue& video_buffers,
const StreamParser::TextBufferQueueMap& text_map);
- void OnSourceInitDone(bool success,
- const StreamParser::InitParameters& params);
+ void OnSourceInitDone(const StreamParser::InitParameters& params);
CreateDemuxerStreamCB create_demuxer_stream_cb_;
NewTextTrackCB new_text_track_cb_;
@@ -235,6 +236,7 @@ class SourceState {
scoped_ptr<FrameProcessor> frame_processor_;
LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
StreamParser::InitCB init_cb_;
// During Append(), OnNewConfigs() will trigger the initialization segment
@@ -256,7 +258,8 @@ class SourceState {
SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
scoped_ptr<FrameProcessor> frame_processor,
const LogCB& log_cb,
- const CreateDemuxerStreamCB& create_demuxer_stream_cb)
+ const CreateDemuxerStreamCB& create_demuxer_stream_cb,
+ const scoped_refptr<MediaLog>& media_log)
: create_demuxer_stream_cb_(create_demuxer_stream_cb),
timestamp_offset_during_append_(NULL),
new_media_segment_(false),
@@ -266,6 +269,7 @@ SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
video_(NULL),
frame_processor_(frame_processor.release()),
log_cb_(log_cb),
+ media_log_(media_log),
auto_update_timestamp_offset_(false) {
DCHECK(!create_demuxer_stream_cb_.is_null());
DCHECK(frame_processor_);
@@ -277,23 +281,21 @@ SourceState::~SourceState() {
STLDeleteValues(&text_stream_map_);
}
-void SourceState::Init(const StreamParser::InitCB& init_cb,
- bool allow_audio,
- bool allow_video,
- const StreamParser::NeedKeyCB& need_key_cb,
- const NewTextTrackCB& new_text_track_cb) {
+void SourceState::Init(
+ const StreamParser::InitCB& init_cb,
+ bool allow_audio,
+ bool allow_video,
+ const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewTextTrackCB& new_text_track_cb) {
new_text_track_cb_ = new_text_track_cb;
init_cb_ = init_cb;
stream_parser_->Init(
base::Bind(&SourceState::OnSourceInitDone, base::Unretained(this)),
- base::Bind(&SourceState::OnNewConfigs,
- base::Unretained(this),
- allow_audio,
- allow_video),
+ base::Bind(&SourceState::OnNewConfigs, base::Unretained(this),
+ allow_audio, allow_video),
base::Bind(&SourceState::OnNewBuffers, base::Unretained(this)),
- new_text_track_cb_.is_null(),
- need_key_cb,
+ new_text_track_cb_.is_null(), encrypted_media_init_data_cb,
base::Bind(&SourceState::OnNewMediaSegment, base::Unretained(this)),
base::Bind(&SourceState::OnEndOfMediaSegment, base::Unretained(this)),
log_cb_);
@@ -332,7 +334,7 @@ bool SourceState::Append(
// append window and timestamp offset pointer. See http://crbug.com/351454.
bool result = stream_parser_->Parse(data, length);
if (!result) {
- MEDIA_LOG(log_cb_)
+ MEDIA_LOG(ERROR, log_cb_)
<< __FUNCTION__ << ": stream parsing failed."
<< " Data size=" << length
<< " append_window_start=" << append_window_start.InSecondsF()
@@ -570,7 +572,7 @@ bool SourceState::OnNewConfigs(
// Signal an error if we get configuration info for stream types that weren't
// specified in AddId() or more configs after a stream is initialized.
if (allow_audio != audio_config.IsValidConfig()) {
- MEDIA_LOG(log_cb_)
+ MEDIA_LOG(ERROR, log_cb_)
<< "Initialization segment"
<< (audio_config.IsValidConfig() ? " has" : " does not have")
<< " an audio track, but the mimetype"
@@ -580,7 +582,7 @@ bool SourceState::OnNewConfigs(
}
if (allow_video != video_config.IsValidConfig()) {
- MEDIA_LOG(log_cb_)
+ MEDIA_LOG(ERROR, log_cb_)
<< "Initialization segment"
<< (video_config.IsValidConfig() ? " has" : " does not have")
<< " a video track, but the mimetype"
@@ -592,6 +594,15 @@ bool SourceState::OnNewConfigs(
bool success = true;
if (audio_config.IsValidConfig()) {
if (!audio_) {
+ media_log_->SetBooleanProperty("found_audio_stream", true);
+ }
+ if (!audio_ ||
+ audio_->audio_decoder_config().codec() != audio_config.codec()) {
+ media_log_->SetStringProperty("audio_codec_name",
+ audio_config.GetHumanReadableCodecName());
+ }
+
+ if (!audio_) {
audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO);
if (!audio_) {
@@ -611,6 +622,15 @@ bool SourceState::OnNewConfigs(
if (video_config.IsValidConfig()) {
if (!video_) {
+ media_log_->SetBooleanProperty("found_video_stream", true);
+ }
+ if (!video_ ||
+ video_->video_decoder_config().codec() != video_config.codec()) {
+ media_log_->SetStringProperty("video_codec_name",
+ video_config.GetHumanReadableCodecName());
+ }
+
+ if (!video_) {
video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO);
if (!video_) {
@@ -635,8 +655,8 @@ bool SourceState::OnNewConfigs(
create_demuxer_stream_cb_.Run(DemuxerStream::TEXT);
if (!frame_processor_->AddTrack(itr->first, text_stream)) {
success &= false;
- MEDIA_LOG(log_cb_) << "Failed to add text track ID " << itr->first
- << " to frame processor.";
+ MEDIA_LOG(ERROR, log_cb_) << "Failed to add text track ID "
+ << itr->first << " to frame processor.";
break;
}
text_stream->UpdateTextConfig(itr->second, log_cb_);
@@ -647,7 +667,7 @@ bool SourceState::OnNewConfigs(
const size_t text_count = text_stream_map_.size();
if (text_configs.size() != text_count) {
success &= false;
- MEDIA_LOG(log_cb_) << "The number of text track configs changed.";
+ MEDIA_LOG(ERROR, log_cb_) << "The number of text track configs changed.";
} else if (text_count == 1) {
TextConfigItr config_itr = text_configs.begin();
TextStreamMap::iterator stream_itr = text_stream_map_.begin();
@@ -659,7 +679,8 @@ bool SourceState::OnNewConfigs(
old_config.id());
if (!new_config.Matches(old_config)) {
success &= false;
- MEDIA_LOG(log_cb_) << "New text track config does not match old one.";
+ MEDIA_LOG(ERROR, log_cb_)
+ << "New text track config does not match old one.";
} else {
StreamParser::TrackId old_id = stream_itr->first;
StreamParser::TrackId new_id = config_itr->first;
@@ -669,7 +690,8 @@ bool SourceState::OnNewConfigs(
text_stream_map_[config_itr->first] = text_stream;
} else {
success &= false;
- MEDIA_LOG(log_cb_) << "Error remapping single text track number";
+ MEDIA_LOG(ERROR, log_cb_)
+ << "Error remapping single text track number";
}
}
}
@@ -680,9 +702,9 @@ bool SourceState::OnNewConfigs(
text_stream_map_.find(config_itr->first);
if (stream_itr == text_stream_map_.end()) {
success &= false;
- MEDIA_LOG(log_cb_) << "Unexpected text track configuration "
- "for track ID "
- << config_itr->first;
+ MEDIA_LOG(ERROR, log_cb_)
+ << "Unexpected text track configuration for track ID "
+ << config_itr->first;
break;
}
@@ -691,9 +713,9 @@ bool SourceState::OnNewConfigs(
TextTrackConfig old_config = stream->text_track_config();
if (!new_config.Matches(old_config)) {
success &= false;
- MEDIA_LOG(log_cb_) << "New text track config for track ID "
- << config_itr->first
- << " does not match old one.";
+ MEDIA_LOG(ERROR, log_cb_) << "New text track config for track ID "
+ << config_itr->first
+ << " does not match old one.";
break;
}
}
@@ -767,14 +789,16 @@ bool SourceState::OnNewBuffers(
return true;
}
-void SourceState::OnSourceInitDone(bool success,
- const StreamParser::InitParameters& params) {
+void SourceState::OnSourceInitDone(const StreamParser::InitParameters& params) {
auto_update_timestamp_offset_ = params.auto_update_timestamp_offset;
- base::ResetAndReturn(&init_cb_).Run(success, params);
+ base::ResetAndReturn(&init_cb_).Run(params);
}
-ChunkDemuxerStream::ChunkDemuxerStream(Type type, bool splice_frames_enabled)
+ChunkDemuxerStream::ChunkDemuxerStream(Type type,
+ Liveness liveness,
+ bool splice_frames_enabled)
: type_(type),
+ liveness_(liveness),
state_(UNINITIALIZED),
splice_frames_enabled_(splice_frames_enabled),
partial_append_window_trimming_enabled_(false) {
@@ -972,7 +996,12 @@ void ChunkDemuxerStream::Read(const ReadCB& read_cb) {
CompletePendingReadIfPossible_Locked();
}
-DemuxerStream::Type ChunkDemuxerStream::type() { return type_; }
+DemuxerStream::Type ChunkDemuxerStream::type() const { return type_; }
+
+DemuxerStream::Liveness ChunkDemuxerStream::liveness() const {
+ base::AutoLock auto_lock(lock_);
+ return liveness_;
+}
AudioDecoderConfig ChunkDemuxerStream::audio_decoder_config() {
CHECK_EQ(type_, AUDIO);
@@ -988,14 +1017,19 @@ VideoDecoderConfig ChunkDemuxerStream::video_decoder_config() {
bool ChunkDemuxerStream::SupportsConfigChanges() { return true; }
+VideoRotation ChunkDemuxerStream::video_rotation() {
+ return VIDEO_ROTATION_0;
+}
+
TextTrackConfig ChunkDemuxerStream::text_track_config() {
CHECK_EQ(type_, TEXT);
base::AutoLock auto_lock(lock_);
return stream_->GetCurrentTextTrackConfig();
}
-VideoRotation ChunkDemuxerStream::video_rotation() {
- return VIDEO_ROTATION_0;
+void ChunkDemuxerStream::SetLiveness(Liveness liveness) {
+ base::AutoLock auto_lock(lock_);
+ liveness_ = liveness;
}
void ChunkDemuxerStream::ChangeState_Locked(State state) {
@@ -1023,19 +1057,29 @@ void ChunkDemuxerStream::CompletePendingReadIfPossible_Locked() {
switch (stream_->GetNextBuffer(&buffer)) {
case SourceBufferStream::kSuccess:
status = DemuxerStream::kOk;
+ DVLOG(2) << __FUNCTION__ << ": returning kOk, type " << type_
+ << ", dts " << buffer->GetDecodeTimestamp().InSecondsF()
+ << ", pts " << buffer->timestamp().InSecondsF()
+ << ", dur " << buffer->duration().InSecondsF()
+ << ", key " << buffer->is_key_frame();
break;
case SourceBufferStream::kNeedBuffer:
// Return early without calling |read_cb_| since we don't have
// any data to return yet.
+ DVLOG(2) << __FUNCTION__ << ": returning kNeedBuffer, type "
+ << type_;
return;
case SourceBufferStream::kEndOfStream:
status = DemuxerStream::kOk;
buffer = StreamParserBuffer::CreateEOSBuffer();
+ DVLOG(2) << __FUNCTION__ << ": returning kOk with EOS buffer, type "
+ << type_;
break;
case SourceBufferStream::kConfigChange:
- DVLOG(2) << "Config change reported to ChunkDemuxerStream.";
status = kConfigChanged;
buffer = NULL;
+ DVLOG(2) << __FUNCTION__ << ": returning kConfigChange, type "
+ << type_;
break;
}
break;
@@ -1045,33 +1089,39 @@ void ChunkDemuxerStream::CompletePendingReadIfPossible_Locked() {
// because they are associated with the seek.
status = DemuxerStream::kAborted;
buffer = NULL;
+ DVLOG(2) << __FUNCTION__ << ": returning kAborted, type " << type_;
break;
case SHUTDOWN:
status = DemuxerStream::kOk;
buffer = StreamParserBuffer::CreateEOSBuffer();
+ DVLOG(2) << __FUNCTION__ << ": returning kOk with EOS buffer, type "
+ << type_;
break;
}
base::ResetAndReturn(&read_cb_).Run(status, buffer);
}
-ChunkDemuxer::ChunkDemuxer(const base::Closure& open_cb,
- const NeedKeyCB& need_key_cb,
- const LogCB& log_cb,
- bool splice_frames_enabled)
+ChunkDemuxer::ChunkDemuxer(
+ const base::Closure& open_cb,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
+ bool splice_frames_enabled)
: state_(WAITING_FOR_INIT),
cancel_next_seek_(false),
host_(NULL),
open_cb_(open_cb),
- need_key_cb_(need_key_cb),
+ encrypted_media_init_data_cb_(encrypted_media_init_data_cb),
enable_text_(false),
log_cb_(log_cb),
+ media_log_(media_log),
duration_(kNoTimestamp()),
user_specified_duration_(-1),
- liveness_(LIVENESS_UNKNOWN),
+ liveness_(DemuxerStream::LIVENESS_UNKNOWN),
splice_frames_enabled_(splice_frames_enabled) {
DCHECK(!open_cb_.is_null());
- DCHECK(!need_key_cb_.is_null());
+ DCHECK(!encrypted_media_init_data_cb_.is_null());
}
void ChunkDemuxer::Initialize(
@@ -1082,6 +1132,7 @@ void ChunkDemuxer::Initialize(
base::AutoLock auto_lock(lock_);
+ // The |init_cb_| must only be run after this method returns, so always post.
init_cb_ = BindToCurrentLoop(cb);
if (state_ == SHUTDOWN) {
base::ResetAndReturn(&init_cb_).Run(DEMUXER_ERROR_COULD_NOT_OPEN);
@@ -1152,10 +1203,6 @@ TimeDelta ChunkDemuxer::GetStartTime() const {
return TimeDelta();
}
-Demuxer::Liveness ChunkDemuxer::GetLiveness() const {
- return liveness_;
-}
-
void ChunkDemuxer::StartWaitingForSeek(TimeDelta seek_time) {
DVLOG(1) << "StartWaitingForSeek()";
base::AutoLock auto_lock(lock_);
@@ -1228,7 +1275,8 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
new SourceState(stream_parser.Pass(),
frame_processor.Pass(), log_cb_,
base::Bind(&ChunkDemuxer::CreateDemuxerStream,
- base::Unretained(this))));
+ base::Unretained(this)),
+ media_log_));
SourceState::NewTextTrackCB new_text_track_cb;
@@ -1239,10 +1287,7 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
source_state->Init(
base::Bind(&ChunkDemuxer::OnSourceInitDone, base::Unretained(this)),
- has_audio,
- has_video,
- need_key_cb_,
- new_text_track_cb);
+ has_audio, has_video, encrypted_media_init_data_cb_, new_text_track_cb);
source_state_map_[id] = source_state.release();
return kOk;
@@ -1303,18 +1348,7 @@ void ChunkDemuxer::AppendData(
switch (state_) {
case INITIALIZING:
- DCHECK(IsValidId(id));
- if (!source_state_map_[id]->Append(data, length,
- append_window_start,
- append_window_end,
- timestamp_offset,
- init_segment_received_cb)) {
- ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
- return;
- }
- break;
-
- case INITIALIZED: {
+ case INITIALIZED:
DCHECK(IsValidId(id));
if (!source_state_map_[id]->Append(data, length,
append_window_start,
@@ -1324,7 +1358,7 @@ void ChunkDemuxer::AppendData(
ReportError_Locked(PIPELINE_ERROR_DECODE);
return;
}
- } break;
+ break;
case PARSE_ERROR:
DVLOG(1) << "AppendData(): Ignoring data after a parse error.";
@@ -1610,13 +1644,11 @@ bool ChunkDemuxer::IsSeekWaitingForData_Locked() const {
}
void ChunkDemuxer::OnSourceInitDone(
- bool success,
const StreamParser::InitParameters& params) {
- DVLOG(1) << "OnSourceInitDone(" << success << ", "
- << params.duration.InSecondsF() << ")";
+ DVLOG(1) << "OnSourceInitDone(" << params.duration.InSecondsF() << ")";
lock_.AssertAcquired();
DCHECK_EQ(state_, INITIALIZING);
- if (!success || (!audio_ && !video_)) {
+ if (!audio_ && !video_) {
ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
return;
}
@@ -1627,7 +1659,7 @@ void ChunkDemuxer::OnSourceInitDone(
if (!params.timeline_offset.is_null()) {
if (!timeline_offset_.is_null() &&
params.timeline_offset != timeline_offset_) {
- MEDIA_LOG(log_cb_)
+ MEDIA_LOG(ERROR, log_cb_)
<< "Timeline offset is not the same across all SourceBuffers.";
ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
return;
@@ -1636,15 +1668,22 @@ void ChunkDemuxer::OnSourceInitDone(
timeline_offset_ = params.timeline_offset;
}
- if (params.liveness != LIVENESS_UNKNOWN) {
- if (liveness_ != LIVENESS_UNKNOWN && params.liveness != liveness_) {
- MEDIA_LOG(log_cb_)
+ if (params.liveness != DemuxerStream::LIVENESS_UNKNOWN) {
+ if (liveness_ != DemuxerStream::LIVENESS_UNKNOWN &&
+ params.liveness != liveness_) {
+ MEDIA_LOG(ERROR, log_cb_)
<< "Liveness is not the same across all SourceBuffers.";
ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
return;
}
- liveness_ = params.liveness;
+ if (liveness_ != params.liveness) {
+ liveness_ = params.liveness;
+ if (audio_)
+ audio_->SetLiveness(liveness_);
+ if (video_)
+ video_->SetLiveness(liveness_);
+ }
}
// Wait until all streams have initialized.
@@ -1670,19 +1709,19 @@ ChunkDemuxer::CreateDemuxerStream(DemuxerStream::Type type) {
case DemuxerStream::AUDIO:
if (audio_)
return NULL;
- audio_.reset(
- new ChunkDemuxerStream(DemuxerStream::AUDIO, splice_frames_enabled_));
+ audio_.reset(new ChunkDemuxerStream(DemuxerStream::AUDIO, liveness_,
+ splice_frames_enabled_));
return audio_.get();
break;
case DemuxerStream::VIDEO:
if (video_)
return NULL;
- video_.reset(
- new ChunkDemuxerStream(DemuxerStream::VIDEO, splice_frames_enabled_));
+ video_.reset(new ChunkDemuxerStream(DemuxerStream::VIDEO, liveness_,
+ splice_frames_enabled_));
return video_.get();
break;
case DemuxerStream::TEXT: {
- return new ChunkDemuxerStream(DemuxerStream::TEXT,
+ return new ChunkDemuxerStream(DemuxerStream::TEXT, liveness_,
splice_frames_enabled_);
break;
}
diff --git a/chromium/media/filters/chunk_demuxer.h b/chromium/media/filters/chunk_demuxer.h
index 3c0520ffa72..1ef8d263755 100644
--- a/chromium/media/filters/chunk_demuxer.h
+++ b/chromium/media/filters/chunk_demuxer.h
@@ -14,6 +14,7 @@
#include "base/synchronization/lock.h"
#include "media/base/byte_queue.h"
#include "media/base/demuxer.h"
+#include "media/base/demuxer_stream.h"
#include "media/base/ranges.h"
#include "media/base/stream_parser.h"
#include "media/filters/source_buffer_stream.h"
@@ -27,7 +28,7 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
public:
typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
- explicit ChunkDemuxerStream(Type type, bool splice_frames_enabled);
+ ChunkDemuxerStream(Type type, Liveness liveness, bool splice_frames_enabled);
~ChunkDemuxerStream() override;
// ChunkDemuxerStream control methods.
@@ -81,7 +82,8 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
// DemuxerStream methods.
void Read(const ReadCB& read_cb) override;
- Type type() override;
+ Type type() const override;
+ Liveness liveness() const override;
AudioDecoderConfig audio_decoder_config() override;
VideoDecoderConfig video_decoder_config() override;
bool SupportsConfigChanges() override;
@@ -100,6 +102,8 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
return partial_append_window_trimming_enabled_;
}
+ void SetLiveness(Liveness liveness);
+
private:
enum State {
UNINITIALIZED,
@@ -116,6 +120,8 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
// Specifies the type of the stream.
Type type_;
+ Liveness liveness_;
+
scoped_ptr<SourceBufferStream> stream_;
mutable base::Lock lock_;
@@ -141,18 +147,18 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// |open_cb| Run when Initialize() is called to signal that the demuxer
// is ready to receive media data via AppenData().
- // |need_key_cb| Run when the demuxer determines that an encryption key is
- // needed to decrypt the content.
+ // |encrypted_media_init_data_cb| Run when the demuxer determines that an
+ // encryption key is needed to decrypt the content.
// |enable_text| Process inband text tracks in the normal way when true,
// otherwise ignore them.
- // |log_cb| Run when parsing error messages need to be logged to the error
- // console.
+ // |log_cb| Run when the demuxer needs to emit MediaLog messages.
// |splice_frames_enabled| Indicates that it's okay to generate splice frames
// per the MSE specification. Renderers must understand DecoderBuffer's
// splice_timestamp() field.
ChunkDemuxer(const base::Closure& open_cb,
- const NeedKeyCB& need_key_cb,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled);
~ChunkDemuxer() override;
@@ -165,7 +171,6 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
base::Time GetTimelineOffset() const override;
DemuxerStream* GetStream(DemuxerStream::Type type) override;
base::TimeDelta GetStartTime() const override;
- Liveness GetLiveness() const override;
// Methods used by an external object to control this demuxer.
//
@@ -303,8 +308,7 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
bool CanEndOfStream_Locked() const;
// SourceState callbacks.
- void OnSourceInitDone(bool success,
- const StreamParser::InitParameters& params);
+ void OnSourceInitDone(const StreamParser::InitParameters& params);
// Creates a DemuxerStream for the specified |type|.
// Returns a new ChunkDemuxerStream instance if a stream of this type
@@ -353,11 +357,12 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
DemuxerHost* host_;
base::Closure open_cb_;
- NeedKeyCB need_key_cb_;
+ EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
bool enable_text_;
- // Callback used to report error strings that can help the web developer
+ // Callback used to report log messages that can help the web developer
// figure out what is wrong with the content.
LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
PipelineStatusCB init_cb_;
// Callback to execute upon seek completion.
@@ -379,7 +384,7 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
double user_specified_duration_;
base::Time timeline_offset_;
- Liveness liveness_;
+ DemuxerStream::Liveness liveness_;
typedef std::map<std::string, SourceState*> SourceStateMap;
SourceStateMap source_state_map_;
diff --git a/chromium/media/filters/chunk_demuxer_unittest.cc b/chromium/media/filters/chunk_demuxer_unittest.cc
index cc3ab630c75..16ddff5b505 100644
--- a/chromium/media/filters/chunk_demuxer_unittest.cc
+++ b/chromium/media/filters/chunk_demuxer_unittest.cc
@@ -12,13 +12,13 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
+#include "media/base/media_log.h"
#include "media/base/mock_demuxer_host.h"
#include "media/base/test_data_util.h"
#include "media/base/test_helpers.h"
#include "media/filters/chunk_demuxer.h"
#include "media/formats/webm/cluster_builder.h"
#include "media/formats/webm/webm_constants.h"
-#include "media/formats/webm/webm_crypto_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::AnyNumber;
@@ -37,7 +37,7 @@ const uint8 kTracksHeader[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
};
-// WebM Block bytes that represent a VP8 keyframe.
+// WebM Block bytes that represent a VP8 key frame.
const uint8 kVP8Keyframe[] = {
0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
};
@@ -45,11 +45,16 @@ const uint8 kVP8Keyframe[] = {
// WebM Block bytes that represent a VP8 interframe.
const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
-static const uint8 kCuesHeader[] = {
+const uint8 kCuesHeader[] = {
0x1C, 0x53, 0xBB, 0x6B, // Cues ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
};
+const uint8 kEncryptedMediaInitData[] = {
+ 0x68, 0xFE, 0xF9, 0xA1, 0xB3, 0x0D, 0x6B, 0x4D,
+ 0xF2, 0x22, 0xB5, 0x0B, 0x4D, 0xE9, 0xE9, 0x95,
+};
+
const int kTracksHeaderSize = sizeof(kTracksHeader);
const int kTracksSizeOffset = 4;
@@ -136,8 +141,6 @@ static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
*called = true;
}
-static void LogFunc(const std::string& str) { DVLOG(1) << str; }
-
class ChunkDemuxerTest : public ::testing::Test {
protected:
enum CodecsIndex {
@@ -171,10 +174,11 @@ class ChunkDemuxerTest : public ::testing::Test {
void CreateNewDemuxer() {
base::Closure open_cb =
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
- Demuxer::NeedKeyCB need_key_cb =
- base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
- demuxer_.reset(
- new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
+ Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
+ &ChunkDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
+ demuxer_.reset(new ChunkDemuxer(
+ open_cb, encrypted_media_init_data_cb, base::Bind(&AddLogEntryForTest),
+ scoped_refptr<MediaLog>(new MediaLog()), true));
}
virtual ~ChunkDemuxerTest() {
@@ -429,9 +433,9 @@ class ChunkDemuxerTest : public ::testing::Test {
// |block_descriptions| - A space delimited string of block info that
// is used to populate |blocks|. Each block info has a timestamp in
// milliseconds and optionally followed by a 'K' to indicate that a block
- // should be marked as a keyframe. For example "0K 30 60" should populate
- // |blocks| with 3 BlockInfo objects: a keyframe with timestamp 0 and 2
- // non-keyframes at 30ms and 60ms.
+ // should be marked as a key frame. For example "0K 30 60" should populate
+ // |blocks| with 3 BlockInfo objects: a key frame with timestamp 0 and 2
+ // non-key-frames at 30ms and 60ms.
void ParseBlockDescriptions(int track_number,
const std::string block_descriptions,
std::vector<BlockInfo>* blocks) {
@@ -457,8 +461,8 @@ class ChunkDemuxerTest : public ::testing::Test {
block_info.duration = kTextBlockDuration;
ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
<< "Text block with timestamp " << block_info.timestamp_in_ms
- << " was not marked as a keyframe."
- << " All text blocks must be keyframes";
+ << " was not marked as a key frame."
+ << " All text blocks must be key frames";
}
if (track_number == kAudioTrackNum)
@@ -661,7 +665,7 @@ class ChunkDemuxerTest : public ::testing::Test {
int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
PipelineStatus expected_status =
- (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
+ (stream_flags != 0) ? PIPELINE_OK : PIPELINE_ERROR_DECODE;
base::TimeDelta expected_duration = kNoTimestamp();
if (expected_status == PIPELINE_OK)
@@ -841,7 +845,7 @@ class ChunkDemuxerTest : public ::testing::Test {
int video_timecode = first_video_timecode;
// Create simple blocks for everything except the last 2 blocks.
- // The first video frame must be a keyframe.
+ // The first video frame must be a key frame.
uint8 video_flag = kWebMFlagKeyframe;
for (int i = 0; i < block_count - 2; i++) {
if (audio_timecode <= video_timecode) {
@@ -1074,6 +1078,9 @@ class ChunkDemuxerTest : public ::testing::Test {
ss << " ";
ss << buffer->timestamp().InMilliseconds();
+ if (buffer->is_key_frame())
+ ss << "K";
+
// Handle preroll buffers.
if (EndsWith(timestamps[i], "P", true)) {
ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
@@ -1150,17 +1157,9 @@ class ChunkDemuxerTest : public ::testing::Test {
}
MOCK_METHOD0(DemuxerOpened, void());
- // TODO(xhwang): This is a workaround of the issue that move-only parameters
- // are not supported in mocked methods. Remove this when the issue is fixed
- // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
- // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
- MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
- const uint8* init_data, int init_data_size));
- void DemuxerNeedKey(const std::string& type,
- const std::vector<uint8>& init_data) {
- const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
- NeedKeyMock(type, init_data_ptr, init_data.size());
- }
+ MOCK_METHOD2(OnEncryptedMediaInitData,
+ void(EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data));
MOCK_METHOD0(InitSegmentReceived, void(void));
@@ -1221,8 +1220,12 @@ TEST_F(ChunkDemuxerTest, Init) {
if (is_audio_encrypted || is_video_encrypted) {
int need_key_count = (is_audio_encrypted ? 1 : 0) +
(is_video_encrypted ? 1 : 0);
- EXPECT_CALL(*this, NeedKeyMock(kWebMInitDataType, NotNull(),
- DecryptConfig::kDecryptionKeySize))
+ EXPECT_CALL(*this, OnEncryptedMediaInitData(
+ EmeInitDataType::WEBM,
+ std::vector<uint8>(
+ kEncryptedMediaInitData,
+ kEncryptedMediaInitData +
+ arraysize(kEncryptedMediaInitData))))
.Times(Exactly(need_key_count));
}
@@ -1384,15 +1387,15 @@ TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
CheckExpectedRanges(kSourceId, "{ [0,92) }");
- CheckExpectedBuffers(audio_stream, "0 23 46 69");
- CheckExpectedBuffers(video_stream, "0 30 60");
- CheckExpectedBuffers(text_stream, "10 45");
+ CheckExpectedBuffers(audio_stream, "0K 23K 46K 69K");
+ CheckExpectedBuffers(video_stream, "0K 30 60K");
+ CheckExpectedBuffers(text_stream, "10K 45K");
ShutdownDemuxer();
}
TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
- // Tests that non-keyframes following an init segment are allowed
+ // Tests that non-key-frames following an init segment are allowed
// and dropped, as expected if the initialization segment received
// algorithm correctly sets the needs random access point flag to true for all
// track buffers. Note that the first initialization segment is insufficient
@@ -1422,9 +1425,9 @@ TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
MuxedStreamInfo(kTextTrackNum, "80K 90K"));
CheckExpectedRanges(kSourceId, "{ [23,92) }");
- CheckExpectedBuffers(audio_stream, "23 46 69");
- CheckExpectedBuffers(video_stream, "30 90");
- CheckExpectedBuffers(text_stream, "25 40 80 90");
+ CheckExpectedBuffers(audio_stream, "23K 46K 69K");
+ CheckExpectedBuffers(video_stream, "30K 90K");
+ CheckExpectedBuffers(text_stream, "25K 40K 80K 90K");
}
// Make sure that the demuxer reports an error if Shutdown()
@@ -1682,7 +1685,7 @@ TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
+ &host_, NewExpectedStatusCB(PIPELINE_ERROR_DECODE), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
@@ -2003,6 +2006,11 @@ TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
kInfiniteDuration()));
+
+ DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
+ EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, audio->liveness());
+ DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
+ EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, video->liveness());
}
TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
@@ -2117,7 +2125,7 @@ TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
- kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
+ kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
@@ -2133,7 +2141,7 @@ TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
- DEMUXER_ERROR_COULD_NOT_OPEN), true);
+ PIPELINE_ERROR_DECODE), true);
std::vector<std::string> codecs(1);
codecs[0] = "vorbis";
@@ -2147,7 +2155,7 @@ TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
- DEMUXER_ERROR_COULD_NOT_OPEN), true);
+ PIPELINE_ERROR_DECODE), true);
std::vector<std::string> codecs(1);
codecs[0] = "vp8";
@@ -3141,9 +3149,10 @@ TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
true, true, true, true, false,
};
- COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
- test_arrays_out_of_sync);
- COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
+ static_assert(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
+ "test arrays out of sync");
+ static_assert(arraysize(kBuffer) == sizeof(kBuffer),
+ "there should be one byte per index");
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
@@ -3409,13 +3418,13 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
// in the buffer. Also verify that buffers that start inside the
// window and extend beyond the end of the window are not included.
CheckExpectedRanges(kSourceId, "{ [120,270) }");
- CheckExpectedBuffers(stream, "120 150 180 210 240");
+ CheckExpectedBuffers(stream, "120K 150 180 210 240K");
// Extend the append window to [50,650).
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
// Append more data and verify that adding buffers start at the next
- // keyframe.
+ // key frame.
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"360 390 420K 450 480 510 540K 570 600 630K");
CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
@@ -3444,7 +3453,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
// The "50P" buffer is the "0" buffer marked for complete discard. The next
// "50" buffer is the "30" buffer marked with 20ms of start discard.
- CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
+ CheckExpectedBuffers(stream, "50KP 50K 60K 90K 120K 150K 180K 210K 240K");
// Extend the append window to [50,650).
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
@@ -3491,7 +3500,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
- CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
+ CheckExpectedBuffers(stream, "50KP 50K 62K 86K 109K 122K 125K 128K");
}
TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
@@ -3533,7 +3542,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
Seek(duration_1);
ExpectConfigChanged(DemuxerStream::AUDIO);
ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
- CheckExpectedBuffers(stream, "2746 2767 2789 2810");
+ CheckExpectedBuffers(stream, "2746K 2767K 2789K 2810K");
}
TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
@@ -3558,8 +3567,8 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
// in the buffer. Also verify that cues that extend beyond the
// window are not included.
CheckExpectedRanges(kSourceId, "{ [100,270) }");
- CheckExpectedBuffers(video_stream, "120 150 180 210 240");
- CheckExpectedBuffers(text_stream, "100");
+ CheckExpectedBuffers(video_stream, "120K 150 180 210 240K");
+ CheckExpectedBuffers(text_stream, "100K");
// Extend the append window to [20,650).
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
@@ -3573,8 +3582,8 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
// Seek to the new range and verify that the expected buffers are returned.
Seek(base::TimeDelta::FromMilliseconds(420));
- CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
- CheckExpectedBuffers(text_stream, "400 500");
+ CheckExpectedBuffers(video_stream, "420K 450 480 510 540K 570 600");
+ CheckExpectedBuffers(text_stream, "400K 500K");
}
TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
@@ -3599,9 +3608,9 @@ TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
- CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
- CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
- CheckExpectedBuffers(text_stream, "0 100 200");
+ CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
+ CheckExpectedBuffers(video_stream, "0K 30 60 90 120K 150 180");
+ CheckExpectedBuffers(text_stream, "0K 100K 200K");
// Remove the buffers that were added.
demuxer_->Remove(kSourceId, base::TimeDelta(),
@@ -3618,9 +3627,9 @@ TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
Seek(base::TimeDelta());
- CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
- CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
- CheckExpectedBuffers(text_stream, "1 101 201");
+ CheckExpectedBuffers(audio_stream, "1K 21K 41K 61K 81K 101K 121K 141K");
+ CheckExpectedBuffers(video_stream, "1K 31 61 91 121K 151 181");
+ CheckExpectedBuffers(text_stream, "1K 101K 201K");
}
TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
@@ -3639,7 +3648,7 @@ TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
"0K 20K 40K 60K 80K 100K 120K 140K");
CheckExpectedRanges(kSourceId, "{ [0,160) }");
- CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
+ CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
demuxer_->Remove(kSourceId,
base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
@@ -3647,7 +3656,7 @@ TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
Seek(base::TimeDelta());
CheckExpectedRanges(kSourceId, "{ [0,160) }");
- CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
+ CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
}
// Verifies that a Seek() will complete without text cues for
@@ -3687,8 +3696,8 @@ TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
EXPECT_FALSE(text_read_done);
// Read some audio & video buffers to further verify seek completion.
- CheckExpectedBuffers(audio_stream, "120 140");
- CheckExpectedBuffers(video_stream, "120 150");
+ CheckExpectedBuffers(audio_stream, "120K 140K");
+ CheckExpectedBuffers(video_stream, "120K 150");
EXPECT_FALSE(text_read_done);
@@ -3704,10 +3713,10 @@ TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
// NOTE: we start at 275 here because the buffer at 225 was returned
// to the pending read initiated above.
- CheckExpectedBuffers(text_stream, "275 325");
+ CheckExpectedBuffers(text_stream, "275K 325K");
// Verify that audio & video streams continue to return expected values.
- CheckExpectedBuffers(audio_stream, "160 180");
+ CheckExpectedBuffers(audio_stream, "160K 180K");
CheckExpectedBuffers(video_stream, "180 210");
}
diff --git a/chromium/media/filters/clockless_video_frame_scheduler.cc b/chromium/media/filters/clockless_video_frame_scheduler.cc
deleted file mode 100644
index b37d4307763..00000000000
--- a/chromium/media/filters/clockless_video_frame_scheduler.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/clockless_video_frame_scheduler.h"
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-ClocklessVideoFrameScheduler::ClocklessVideoFrameScheduler(
- const DisplayCB& display_cb)
- : display_cb_(display_cb) {
-}
-
-ClocklessVideoFrameScheduler::~ClocklessVideoFrameScheduler() {
-}
-
-void ClocklessVideoFrameScheduler::ScheduleVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks /* wall_ticks */,
- const DoneCB& done_cb) {
- display_cb_.Run(frame);
- base::MessageLoopProxy::current()->PostTask(
- FROM_HERE, base::Bind(done_cb, frame, DISPLAYED));
-}
-
-void ClocklessVideoFrameScheduler::Reset() {
-}
-
-} // namespace media
diff --git a/chromium/media/filters/clockless_video_frame_scheduler.h b/chromium/media/filters/clockless_video_frame_scheduler.h
deleted file mode 100644
index b13ffc2f74b..00000000000
--- a/chromium/media/filters/clockless_video_frame_scheduler.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_CLOCKLESS_VIDEO_FRAME_SCHEDULER_H_
-#define MEDIA_FILTERS_CLOCKLESS_VIDEO_FRAME_SCHEDULER_H_
-
-#include "media/filters/video_frame_scheduler.h"
-
-namespace media {
-
-// A scheduler that immediately displays frames.
-class ClocklessVideoFrameScheduler : public VideoFrameScheduler {
- public:
- typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> DisplayCB;
-
- explicit ClocklessVideoFrameScheduler(const DisplayCB& display_cb);
- ~ClocklessVideoFrameScheduler() override;
-
- // VideoFrameScheduler implementation.
- void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) override;
- void Reset() override;
-
- private:
- DisplayCB display_cb_;
-
- DISALLOW_COPY_AND_ASSIGN(ClocklessVideoFrameScheduler);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_CLOCKLESS_VIDEO_FRAME_SCHEDULER_H_
diff --git a/chromium/media/filters/context_3d.h b/chromium/media/filters/context_3d.h
new file mode 100644
index 00000000000..ccb0c7c962a
--- /dev/null
+++ b/chromium/media/filters/context_3d.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_CONTEXT_3D_H_
+#define MEDIA_FILTERS_CONTEXT_3D_H_
+
+class GrContext;
+
+namespace gpu {
+namespace gles2 {
+class GLES2Interface;
+}
+}
+
+namespace media {
+
+// This struct can be used to make media use gpu::gles2::GLES2Interface and
+// GrContext.
+// Usage:
+// gpu::gles2::GLES2Interface* gl = ...;
+// GrContext* gr_context = ...;
+// Context3D context_3d(gl, gr_context);
+
+struct Context3D {
+ Context3D() : gl(nullptr), gr_context(nullptr) {}
+ Context3D(gpu::gles2::GLES2Interface* gl_, class GrContext* gr_context_)
+ : gl(gl_), gr_context(gr_context_) {}
+
+ gpu::gles2::GLES2Interface* gl;
+ class GrContext* gr_context;
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_CONTEXT_3D_H_
diff --git a/chromium/media/filters/decoder_selector.cc b/chromium/media/filters/decoder_selector.cc
index 7214b338be6..555a411bd76 100644
--- a/chromium/media/filters/decoder_selector.cc
+++ b/chromium/media/filters/decoder_selector.cc
@@ -51,13 +51,12 @@ static bool IsStreamEncrypted(DemuxerStream* stream) {
template <DemuxerStream::Type StreamType>
DecoderSelector<StreamType>::DecoderSelector(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- ScopedVector<Decoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ ScopedVector<Decoder> decoders)
: task_runner_(task_runner),
decoders_(decoders.Pass()),
- set_decryptor_ready_cb_(set_decryptor_ready_cb),
input_stream_(NULL),
- weak_ptr_factory_(this) {}
+ weak_ptr_factory_(this) {
+}
template <DemuxerStream::Type StreamType>
DecoderSelector<StreamType>::~DecoderSelector() {
@@ -74,12 +73,17 @@ DecoderSelector<StreamType>::~DecoderSelector() {
template <DemuxerStream::Type StreamType>
void DecoderSelector<StreamType>::SelectDecoder(
DemuxerStream* stream,
- bool low_delay,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
const SelectDecoderCB& select_decoder_cb,
- const typename Decoder::OutputCB& output_cb) {
+ const typename Decoder::OutputCB& output_cb,
+ const base::Closure& waiting_for_decryption_key_cb) {
DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(stream);
+ DCHECK(select_decoder_cb_.is_null());
+
+ set_decryptor_ready_cb_ = set_decryptor_ready_cb;
+ waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
// Make sure |select_decoder_cb| runs on a different execution stack.
select_decoder_cb_ = BindToCurrentLoop(select_decoder_cb);
@@ -91,7 +95,6 @@ void DecoderSelector<StreamType>::SelectDecoder(
}
input_stream_ = stream;
- low_delay_ = low_delay;
output_cb_ = output_cb;
if (!IsStreamEncrypted(input_stream_)) {
@@ -99,19 +102,17 @@ void DecoderSelector<StreamType>::SelectDecoder(
return;
}
- // This could happen if Encrypted Media Extension (EME) is not enabled.
+ // This could be null if Encrypted Media Extension (EME) is not enabled.
if (set_decryptor_ready_cb_.is_null()) {
ReturnNullDecoder();
return;
}
decoder_.reset(new typename StreamTraits::DecryptingDecoderType(
- task_runner_, set_decryptor_ready_cb_));
+ task_runner_, set_decryptor_ready_cb_, waiting_for_decryption_key_cb_));
- DecoderStreamTraits<StreamType>::Initialize(
- decoder_.get(),
- StreamTraits::GetDecoderConfig(*input_stream_),
- low_delay_,
+ DecoderStreamTraits<StreamType>::InitializeDecoder(
+ decoder_.get(), input_stream_,
base::Bind(&DecoderSelector<StreamType>::DecryptingDecoderInitDone,
weak_ptr_factory_.GetWeakPtr()),
output_cb_);
@@ -131,8 +132,8 @@ void DecoderSelector<StreamType>::DecryptingDecoderInitDone(
decoder_.reset();
- decrypted_stream_.reset(
- new DecryptingDemuxerStream(task_runner_, set_decryptor_ready_cb_));
+ decrypted_stream_.reset(new DecryptingDemuxerStream(
+ task_runner_, set_decryptor_ready_cb_, waiting_for_decryption_key_cb_));
decrypted_stream_->Initialize(
input_stream_,
@@ -170,10 +171,8 @@ void DecoderSelector<StreamType>::InitializeDecoder() {
decoder_.reset(decoders_.front());
decoders_.weak_erase(decoders_.begin());
- DecoderStreamTraits<StreamType>::Initialize(
- decoder_.get(),
- StreamTraits::GetDecoderConfig(*input_stream_),
- low_delay_,
+ DecoderStreamTraits<StreamType>::InitializeDecoder(
+ decoder_.get(), input_stream_,
base::Bind(&DecoderSelector<StreamType>::DecoderInitDone,
weak_ptr_factory_.GetWeakPtr()),
output_cb_);
diff --git a/chromium/media/filters/decoder_selector.h b/chromium/media/filters/decoder_selector.h
index c50f9fba718..59c90f57644 100644
--- a/chromium/media/filters/decoder_selector.h
+++ b/chromium/media/filters/decoder_selector.h
@@ -50,25 +50,28 @@ class MEDIA_EXPORT DecoderSelector {
SelectDecoderCB;
// |decoders| contains the Decoders to use when initializing.
- //
- // |set_decryptor_ready_cb| is optional. If |set_decryptor_ready_cb| is null,
- // no decryptor will be available to perform decryption.
DecoderSelector(
const scoped_refptr<base::SingleThreadTaskRunner>& message_loop,
- ScopedVector<Decoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ ScopedVector<Decoder> decoders);
// Aborts pending Decoder selection and fires |select_decoder_cb| with
// NULL and NULL immediately if it's pending.
~DecoderSelector();
- // Initializes and selects a Decoder that can decode the |stream|.
- // Selected Decoder (and DecryptingDemuxerStream) is returned via
+ // Initializes and selects the first Decoder that can decode the |stream|.
+ // The selected Decoder (and DecryptingDemuxerStream) is returned via
// the |select_decoder_cb|.
+ // Notes:
+ // 1. This must not be called again before |select_decoder_cb| is run.
+ // 2. Decoders that fail to initialize will be deleted. Future calls will
+ // select from the decoders following the decoder that was last returned.
+ // 3. |set_decryptor_ready_cb| is optional. If |set_decryptor_ready_cb| is
+ // null, no decryptor will be available to perform decryption.
void SelectDecoder(DemuxerStream* stream,
- bool low_delay,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
const SelectDecoderCB& select_decoder_cb,
- const typename Decoder::OutputCB& output_cb);
+ const typename Decoder::OutputCB& output_cb,
+ const base::Closure& waiting_for_decryption_key_cb);
private:
void DecryptingDecoderInitDone(PipelineStatus status);
@@ -79,12 +82,12 @@ class MEDIA_EXPORT DecoderSelector {
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
ScopedVector<Decoder> decoders_;
- SetDecryptorReadyCB set_decryptor_ready_cb_;
DemuxerStream* input_stream_;
- bool low_delay_;
+ SetDecryptorReadyCB set_decryptor_ready_cb_;
SelectDecoderCB select_decoder_cb_;
typename Decoder::OutputCB output_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
scoped_ptr<Decoder> decoder_;
scoped_ptr<DecryptingDemuxerStream> decrypted_stream_;
diff --git a/chromium/media/filters/decoder_stream.cc b/chromium/media/filters/decoder_stream.cc
index b317a013d41..9e387cb9298 100644
--- a/chromium/media/filters/decoder_stream.cc
+++ b/chromium/media/filters/decoder_stream.cc
@@ -6,14 +6,12 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/debug/trace_event.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
-#include "media/base/audio_decoder.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/demuxer_stream.h"
#include "media/base/video_decoder.h"
#include "media/filters/decrypting_demuxer_stream.h"
@@ -42,21 +40,18 @@ template <DemuxerStream::Type StreamType>
DecoderStream<StreamType>::DecoderStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
ScopedVector<Decoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
const scoped_refptr<MediaLog>& media_log)
: task_runner_(task_runner),
media_log_(media_log),
state_(STATE_UNINITIALIZED),
stream_(NULL),
- low_delay_(false),
decoder_selector_(
- new DecoderSelector<StreamType>(task_runner,
- decoders.Pass(),
- set_decryptor_ready_cb)),
+ new DecoderSelector<StreamType>(task_runner, decoders.Pass())),
active_splice_(false),
decoding_eos_(false),
pending_decode_requests_(0),
- weak_factory_(this) {}
+ weak_factory_(this) {
+}
template <DemuxerStream::Type StreamType>
DecoderStream<StreamType>::~DecoderStream() {
@@ -82,29 +77,25 @@ DecoderStream<StreamType>::~DecoderStream() {
}
template <DemuxerStream::Type StreamType>
-void DecoderStream<StreamType>::Initialize(DemuxerStream* stream,
- bool low_delay,
- const StatisticsCB& statistics_cb,
- const InitCB& init_cb) {
+void DecoderStream<StreamType>::Initialize(
+ DemuxerStream* stream,
+ const InitCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const StatisticsCB& statistics_cb,
+ const base::Closure& waiting_for_decryption_key_cb) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_UNINITIALIZED) << state_;
+ DCHECK_EQ(state_, STATE_UNINITIALIZED);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
statistics_cb_ = statistics_cb;
init_cb_ = init_cb;
+ waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
stream_ = stream;
- low_delay_ = low_delay;
state_ = STATE_INITIALIZING;
- // TODO(xhwang): DecoderSelector only needs a config to select a decoder.
- decoder_selector_->SelectDecoder(
- stream, low_delay,
- base::Bind(&DecoderStream<StreamType>::OnDecoderSelected,
- weak_factory_.GetWeakPtr()),
- base::Bind(&DecoderStream<StreamType>::OnDecodeOutputReady,
- weak_factory_.GetWeakPtr()));
+ SelectDecoder(set_decryptor_ready_cb);
}
template <DemuxerStream::Type StreamType>
@@ -146,7 +137,7 @@ template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::Reset(const base::Closure& closure) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ != STATE_UNINITIALIZED)<< state_;
+ DCHECK_NE(state_, STATE_UNINITIALIZED);
DCHECK(reset_cb_.is_null());
reset_cb_ = closure;
@@ -214,6 +205,18 @@ bool DecoderStream<StreamType>::CanDecodeMore() const {
}
template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::SelectDecoder(
+ const SetDecryptorReadyCB& set_decryptor_ready_cb) {
+ decoder_selector_->SelectDecoder(
+ stream_, set_decryptor_ready_cb,
+ base::Bind(&DecoderStream<StreamType>::OnDecoderSelected,
+ weak_factory_.GetWeakPtr()),
+ base::Bind(&DecoderStream<StreamType>::OnDecodeOutputReady,
+ weak_factory_.GetWeakPtr()),
+ waiting_for_decryption_key_cb_);
+}
+
+template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::OnDecoderSelected(
scoped_ptr<Decoder> selected_decoder,
scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
@@ -221,31 +224,46 @@ void DecoderStream<StreamType>::OnDecoderSelected(
<< (selected_decoder ? selected_decoder->GetDisplayName()
: "No decoder selected.");
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
- DCHECK(!init_cb_.is_null());
- DCHECK(read_cb_.is_null());
- DCHECK(reset_cb_.is_null());
+ DCHECK(state_ == STATE_INITIALIZING || state_ == STATE_REINITIALIZING_DECODER)
+ << state_;
+ if (state_ == STATE_INITIALIZING) {
+ DCHECK(!init_cb_.is_null());
+ DCHECK(read_cb_.is_null());
+ DCHECK(reset_cb_.is_null());
+ } else {
+ DCHECK(decoder_);
+ }
- decoder_selector_.reset();
- if (decrypting_demuxer_stream)
- stream_ = decrypting_demuxer_stream.get();
+ previous_decoder_ = decoder_.Pass();
+ decoder_ = selected_decoder.Pass();
+ if (decrypting_demuxer_stream) {
+ decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
+ stream_ = decrypting_demuxer_stream_.get();
+ }
- if (!selected_decoder) {
- state_ = STATE_UNINITIALIZED;
- base::ResetAndReturn(&init_cb_).Run(false);
+ if (!decoder_) {
+ if (state_ == STATE_INITIALIZING) {
+ state_ = STATE_UNINITIALIZED;
+ base::ResetAndReturn(&init_cb_).Run(false);
+ } else {
+ CompleteDecoderReinitialization(false);
+ }
return;
}
- state_ = STATE_NORMAL;
- decoder_ = selected_decoder.Pass();
- decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
-
const std::string stream_type = DecoderStreamTraits<StreamType>::ToString();
- media_log_->SetBooleanProperty((stream_type + "_dds").c_str(),
+ media_log_->SetBooleanProperty(stream_type + "_dds",
decrypting_demuxer_stream_);
- media_log_->SetStringProperty((stream_type + "_decoder").c_str(),
+ media_log_->SetStringProperty(stream_type + "_decoder",
decoder_->GetDisplayName());
+ if (state_ == STATE_REINITIALIZING_DECODER) {
+ CompleteDecoderReinitialization(true);
+ return;
+ }
+
+ // Initialization succeeded.
+ state_ = STATE_NORMAL;
if (StreamTraits::NeedsBitstreamConversion(decoder_.get()))
stream_->EnableBitstreamConverter();
base::ResetAndReturn(&init_cb_).Run(true);
@@ -327,8 +345,7 @@ void DecoderStream<StreamType>::OnDecodeDone(int buffer_size,
return;
case Decoder::kAborted:
- // Decoder can return kAborted only when Reset is pending.
- NOTREACHED();
+ // Decoder can return kAborted during Reset() or during destruction.
return;
case Decoder::kOk:
@@ -375,24 +392,22 @@ void DecoderStream<StreamType>::OnDecodeOutputReady(
if (!reset_cb_.is_null())
return;
- // TODO(xhwang): VideoDecoder doesn't need to return EOS after it's flushed.
- // Fix all decoders and remove this block.
- // Store decoded output.
- ready_outputs_.push_back(output);
-
- if (read_cb_.is_null())
+ if (!read_cb_.is_null()) {
+ // If |ready_outputs_| was non-empty, the read would have already been
+ // satisifed by Read().
+ DCHECK(ready_outputs_.empty());
+ SatisfyRead(OK, output);
return;
+ }
- // Satisfy outstanding read request, if any.
- scoped_refptr<Output> read_result = ready_outputs_.front();
- ready_outputs_.pop_front();
- SatisfyRead(OK, output);
+ // Store decoded output.
+ ready_outputs_.push_back(output);
}
template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::ReadFromDemuxerStream() {
FUNCTION_DVLOG(2);
- DCHECK_EQ(state_, STATE_NORMAL) << state_;
+ DCHECK_EQ(state_, STATE_NORMAL);
DCHECK(CanDecodeMore());
DCHECK(reset_cb_.is_null());
@@ -477,15 +492,12 @@ template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::ReinitializeDecoder() {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_FLUSHING_DECODER) << state_;
+ DCHECK_EQ(state_, STATE_FLUSHING_DECODER);
DCHECK_EQ(pending_decode_requests_, 0);
- DCHECK(StreamTraits::GetDecoderConfig(*stream_).IsValidConfig());
state_ = STATE_REINITIALIZING_DECODER;
- DecoderStreamTraits<StreamType>::Initialize(
- decoder_.get(),
- StreamTraits::GetDecoderConfig(*stream_),
- low_delay_,
+ DecoderStreamTraits<StreamType>::InitializeDecoder(
+ decoder_.get(), stream_,
base::Bind(&DecoderStream<StreamType>::OnDecoderReinitialized,
weak_factory_.GetWeakPtr()),
base::Bind(&DecoderStream<StreamType>::OnDecodeOutputReady,
@@ -496,7 +508,7 @@ template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::OnDecoderReinitialized(PipelineStatus status) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER) << state_;
+ DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER);
// ReinitializeDecoder() can be called in two cases:
// 1, Flushing decoder finished (see OnDecodeOutputReady()).
@@ -504,7 +516,25 @@ void DecoderStream<StreamType>::OnDecoderReinitialized(PipelineStatus status) {
// Also, Reset() can be called during pending ReinitializeDecoder().
// This function needs to handle them all!
- state_ = (status == PIPELINE_OK) ? STATE_NORMAL : STATE_ERROR;
+ if (status != PIPELINE_OK) {
+ // Reinitialization failed. Try to fall back to one of the remaining
+ // decoders. This will consume at least one decoder so doing it more than
+ // once is safe.
+ // For simplicity, don't attempt to fall back to a decryptor. Calling this
+ // with a null callback ensures that one won't be selected.
+ SelectDecoder(SetDecryptorReadyCB());
+ } else {
+ CompleteDecoderReinitialization(true);
+ }
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::CompleteDecoderReinitialization(bool success) {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER);
+
+ state_ = success ? STATE_NORMAL : STATE_ERROR;
if (!reset_cb_.is_null()) {
base::ResetAndReturn(&reset_cb_).Run();
diff --git a/chromium/media/filters/decoder_stream.h b/chromium/media/filters/decoder_stream.h
index 28587c573a9..3c0e23ef5fb 100644
--- a/chromium/media/filters/decoder_stream.h
+++ b/chromium/media/filters/decoder_stream.h
@@ -53,16 +53,16 @@ class MEDIA_EXPORT DecoderStream {
DecoderStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
ScopedVector<Decoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
const scoped_refptr<MediaLog>& media_log);
virtual ~DecoderStream();
// Initializes the DecoderStream and returns the initialization result
// through |init_cb|. Note that |init_cb| is always called asynchronously.
void Initialize(DemuxerStream* stream,
- bool low_delay,
+ const InitCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
const StatisticsCB& statistics_cb,
- const InitCB& init_cb);
+ const base::Closure& waiting_for_decryption_key_cb);
// Reads a decoded Output and returns it via the |read_cb|. Note that
// |read_cb| is always called asynchronously. This method should only be
@@ -100,7 +100,7 @@ class MEDIA_EXPORT DecoderStream {
}
// Allows callers to register for notification of config changes; this is
- // called immediately after recieving the 'kConfigChanged' status from the
+ // called immediately after receiving the 'kConfigChanged' status from the
// DemuxerStream, before any action is taken to handle the config change.
typedef base::Closure ConfigChangeObserverCB;
void set_config_change_observer(
@@ -120,6 +120,8 @@ class MEDIA_EXPORT DecoderStream {
STATE_ERROR
};
+ void SelectDecoder(const SetDecryptorReadyCB& set_decryptor_ready_cb);
+
// Called when |decoder_selector| selected the |selected_decoder|.
// |decrypting_demuxer_stream| was also populated if a DecryptingDemuxerStream
// is created to help decrypt the encrypted stream.
@@ -156,6 +158,8 @@ class MEDIA_EXPORT DecoderStream {
// Callback for Decoder reinitialization.
void OnDecoderReinitialized(PipelineStatus status);
+ void CompleteDecoderReinitialization(bool success);
+
void ResetDecoder();
void OnDecoderReset();
@@ -167,17 +171,20 @@ class MEDIA_EXPORT DecoderStream {
StatisticsCB statistics_cb_;
InitCB init_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
ReadCB read_cb_;
base::Closure reset_cb_;
DemuxerStream* stream_;
- bool low_delay_;
scoped_ptr<DecoderSelector<StreamType> > decoder_selector_;
- // These two will be set by DecoderSelector::SelectDecoder().
scoped_ptr<Decoder> decoder_;
+ // TODO(watk): When falling back from H/W decoding to S/W decoding,
+ // destructing the GpuVideoDecoder too early results in black frames being
+ // displayed. |previous_decoder_| is used to keep it alive.
+ scoped_ptr<Decoder> previous_decoder_;
scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream_;
SpliceObserverCB splice_observer_cb_;
diff --git a/chromium/media/filters/decoder_stream_traits.cc b/chromium/media/filters/decoder_stream_traits.cc
index 1c654c3b08f..de8f86009b7 100644
--- a/chromium/media/filters/decoder_stream_traits.cc
+++ b/chromium/media/filters/decoder_stream_traits.cc
@@ -18,13 +18,13 @@ std::string DecoderStreamTraits<DemuxerStream::AUDIO>::ToString() {
return "audio";
}
-void DecoderStreamTraits<DemuxerStream::AUDIO>::Initialize(
+void DecoderStreamTraits<DemuxerStream::AUDIO>::InitializeDecoder(
DecoderType* decoder,
- const DecoderConfigType& config,
- bool low_delay,
+ DemuxerStream* stream,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) {
- decoder->Initialize(config, status_cb, output_cb);
+ DCHECK(stream->audio_decoder_config().IsValidConfig());
+ decoder->Initialize(stream->audio_decoder_config(), status_cb, output_cb);
}
void DecoderStreamTraits<DemuxerStream::AUDIO>::ReportStatistics(
@@ -35,12 +35,6 @@ void DecoderStreamTraits<DemuxerStream::AUDIO>::ReportStatistics(
statistics_cb.Run(statistics);
}
-DecoderStreamTraits<DemuxerStream::AUDIO>::DecoderConfigType
- DecoderStreamTraits<DemuxerStream::AUDIO>::GetDecoderConfig(
- DemuxerStream& stream) {
- return stream.audio_decoder_config();
-}
-
scoped_refptr<DecoderStreamTraits<DemuxerStream::AUDIO>::OutputType>
DecoderStreamTraits<DemuxerStream::AUDIO>::CreateEOSOutput() {
return OutputType::CreateEOSBuffer();
@@ -50,13 +44,15 @@ std::string DecoderStreamTraits<DemuxerStream::VIDEO>::ToString() {
return "video";
}
-void DecoderStreamTraits<DemuxerStream::VIDEO>::Initialize(
+void DecoderStreamTraits<DemuxerStream::VIDEO>::InitializeDecoder(
DecoderType* decoder,
- const DecoderConfigType& config,
- bool low_delay,
+ DemuxerStream* stream,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) {
- decoder->Initialize(config, low_delay, status_cb, output_cb);
+ DCHECK(stream->video_decoder_config().IsValidConfig());
+ decoder->Initialize(stream->video_decoder_config(),
+ stream->liveness() == DemuxerStream::LIVENESS_LIVE,
+ status_cb, output_cb);
}
bool DecoderStreamTraits<DemuxerStream::VIDEO>::NeedsBitstreamConversion(
@@ -72,12 +68,6 @@ void DecoderStreamTraits<DemuxerStream::VIDEO>::ReportStatistics(
statistics_cb.Run(statistics);
}
-DecoderStreamTraits<DemuxerStream::VIDEO>::DecoderConfigType
- DecoderStreamTraits<DemuxerStream::VIDEO>::GetDecoderConfig(
- DemuxerStream& stream) {
- return stream.video_decoder_config();
-}
-
scoped_refptr<DecoderStreamTraits<DemuxerStream::VIDEO>::OutputType>
DecoderStreamTraits<DemuxerStream::VIDEO>::CreateEOSOutput() {
return OutputType::CreateEOSFrame();
diff --git a/chromium/media/filters/decoder_stream_traits.h b/chromium/media/filters/decoder_stream_traits.h
index c995962b444..707ff1ca27f 100644
--- a/chromium/media/filters/decoder_stream_traits.h
+++ b/chromium/media/filters/decoder_stream_traits.h
@@ -25,21 +25,18 @@ template <>
struct DecoderStreamTraits<DemuxerStream::AUDIO> {
typedef AudioBuffer OutputType;
typedef AudioDecoder DecoderType;
- typedef AudioDecoderConfig DecoderConfigType;
typedef DecryptingAudioDecoder DecryptingDecoderType;
typedef base::Callback<void(bool success)> StreamInitCB;
typedef base::Callback<void(const scoped_refptr<OutputType>&)> OutputCB;
static std::string ToString();
- static void Initialize(DecoderType* decoder,
- const DecoderConfigType& config,
- bool low_delay,
- const PipelineStatusCB& status_cb,
- const OutputCB& output_cb);
+ static void InitializeDecoder(DecoderType* decoder,
+ DemuxerStream* stream,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb);
static bool NeedsBitstreamConversion(DecoderType* decoder) { return false; }
static void ReportStatistics(const StatisticsCB& statistics_cb,
int bytes_decoded);
- static DecoderConfigType GetDecoderConfig(DemuxerStream& stream);
static scoped_refptr<OutputType> CreateEOSOutput();
};
@@ -47,21 +44,18 @@ template <>
struct DecoderStreamTraits<DemuxerStream::VIDEO> {
typedef VideoFrame OutputType;
typedef VideoDecoder DecoderType;
- typedef VideoDecoderConfig DecoderConfigType;
typedef DecryptingVideoDecoder DecryptingDecoderType;
typedef base::Callback<void(bool success)> StreamInitCB;
typedef base::Callback<void(const scoped_refptr<OutputType>&)> OutputCB;
static std::string ToString();
- static void Initialize(DecoderType* decoder,
- const DecoderConfigType& config,
- bool low_delay,
- const PipelineStatusCB& status_cb,
- const OutputCB& output_cb);
+ static void InitializeDecoder(DecoderType* decoder,
+ DemuxerStream* stream,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb);
static bool NeedsBitstreamConversion(DecoderType* decoder);
static void ReportStatistics(const StatisticsCB& statistics_cb,
int bytes_decoded);
- static DecoderConfigType GetDecoderConfig(DemuxerStream& stream);
static scoped_refptr<OutputType> CreateEOSOutput();
};
diff --git a/chromium/media/filters/decrypting_audio_decoder.cc b/chromium/media/filters/decrypting_audio_decoder.cc
index c5494f04f1c..7c8f4b20ec6 100644
--- a/chromium/media/filters/decrypting_audio_decoder.cc
+++ b/chromium/media/filters/decrypting_audio_decoder.cc
@@ -17,14 +17,10 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/decryptor.h"
-#include "media/base/demuxer_stream.h"
#include "media/base/pipeline.h"
namespace media {
-const int DecryptingAudioDecoder::kSupportedBitsPerChannel = 16;
-
static inline bool IsOutOfSync(const base::TimeDelta& timestamp_1,
const base::TimeDelta& timestamp_2) {
// Out of sync of 100ms would be pretty noticeable and we should keep any
@@ -36,13 +32,16 @@ static inline bool IsOutOfSync(const base::TimeDelta& timestamp_1,
DecryptingAudioDecoder::DecryptingAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const base::Closure& waiting_for_decryption_key_cb)
: task_runner_(task_runner),
state_(kUninitialized),
+ waiting_for_decryption_key_cb_(waiting_for_decryption_key_cb),
set_decryptor_ready_cb_(set_decryptor_ready_cb),
decryptor_(NULL),
key_added_while_decode_pending_(false),
- weak_factory_(this) {}
+ weak_factory_(this) {
+}
std::string DecryptingAudioDecoder::GetDisplayName() const {
return "DecryptingAudioDecoder";
@@ -248,7 +247,7 @@ void DecryptingAudioDecoder::DecodePendingBuffer() {
void DecryptingAudioDecoder::DeliverFrame(
int buffer_size,
Decryptor::Status status,
- const Decryptor::AudioBuffers& frames) {
+ const Decryptor::AudioFrames& frames) {
DVLOG(3) << "DeliverFrame() - status: " << status;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecode) << state_;
@@ -290,6 +289,7 @@ void DecryptingAudioDecoder::DeliverFrame(
}
state_ = kWaitingForKey;
+ waiting_for_decryption_key_cb_.Run();
return;
}
@@ -340,8 +340,8 @@ void DecryptingAudioDecoder::DoReset() {
}
void DecryptingAudioDecoder::ProcessDecodedFrames(
- const Decryptor::AudioBuffers& frames) {
- for (Decryptor::AudioBuffers::const_iterator iter = frames.begin();
+ const Decryptor::AudioFrames& frames) {
+ for (Decryptor::AudioFrames::const_iterator iter = frames.begin();
iter != frames.end();
++iter) {
scoped_refptr<AudioBuffer> frame = *iter;
diff --git a/chromium/media/filters/decrypting_audio_decoder.h b/chromium/media/filters/decrypting_audio_decoder.h
index 5fdb6f84868..30b0a63926d 100644
--- a/chromium/media/filters/decrypting_audio_decoder.h
+++ b/chromium/media/filters/decrypting_audio_decoder.h
@@ -30,16 +30,10 @@ class Decryptor;
// that no locks are required for thread safety.
class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
public:
- // We do not currently have a way to let the Decryptor choose the output
- // audio sample format and notify us of its choice. Therefore, we require all
- // Decryptor implementations to decode audio into a fixed integer sample
- // format designated by kSupportedBitsPerChannel.
- // TODO(xhwang): Remove this restriction after http://crbug.com/169105 fixed.
- static const int kSupportedBitsPerChannel;
-
DecryptingAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const base::Closure& waiting_for_decryption_key_cb);
~DecryptingAudioDecoder() override;
// AudioDecoder implementation.
@@ -83,7 +77,7 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
// Callback for Decryptor::DecryptAndDecodeAudio().
void DeliverFrame(int buffer_size,
Decryptor::Status status,
- const Decryptor::AudioBuffers& frames);
+ const Decryptor::AudioFrames& frames);
// Callback for the |decryptor_| to notify this object that a new key has been
// added.
@@ -93,7 +87,7 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
void DoReset();
// Sets timestamps for |frames| and then passes them to |output_cb_|.
- void ProcessDecodedFrames(const Decryptor::AudioBuffers& frames);
+ void ProcessDecodedFrames(const Decryptor::AudioFrames& frames);
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
@@ -103,6 +97,7 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
OutputCB output_cb_;
DecodeCB decode_cb_;
base::Closure reset_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
// The current decoder configuration.
AudioDecoderConfig config_;
@@ -124,9 +119,8 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
scoped_ptr<AudioTimestampHelper> timestamp_helper_;
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<DecryptingAudioDecoder> weak_factory_;
base::WeakPtr<DecryptingAudioDecoder> weak_this_;
+ base::WeakPtrFactory<DecryptingAudioDecoder> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(DecryptingAudioDecoder);
};
diff --git a/chromium/media/filters/decrypting_audio_decoder_unittest.cc b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
index f6acc1bb607..1f8395bf53a 100644
--- a/chromium/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
@@ -63,7 +63,9 @@ class DecryptingAudioDecoderTest : public testing::Test {
message_loop_.message_loop_proxy(),
base::Bind(
&DecryptingAudioDecoderTest::RequestDecryptorNotification,
- base::Unretained(this)))),
+ base::Unretained(this)),
+ base::Bind(&DecryptingAudioDecoderTest::OnWaitingForDecryptionKey,
+ base::Unretained(this)))),
decryptor_(new StrictMock<MockDecryptor>()),
num_decrypt_and_decode_calls_(0),
num_frames_in_decryptor_(0),
@@ -155,13 +157,13 @@ class DecryptingAudioDecoderTest : public testing::Test {
if (num_decrypt_and_decode_calls_ <= kDecodingDelay ||
num_frames_in_decryptor_ == 0) {
- audio_decode_cb.Run(Decryptor::kNeedMoreData, Decryptor::AudioBuffers());
+ audio_decode_cb.Run(Decryptor::kNeedMoreData, Decryptor::AudioFrames());
return;
}
num_frames_in_decryptor_--;
audio_decode_cb.Run(Decryptor::kSuccess,
- Decryptor::AudioBuffers(1, decoded_frame_));
+ Decryptor::AudioFrames(1, decoded_frame_));
}
// Sets up expectations and actions to put DecryptingAudioDecoder in an
@@ -203,7 +205,8 @@ class DecryptingAudioDecoderTest : public testing::Test {
void EnterWaitingForKeyState() {
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(encrypted_buffer_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kNoKey,
- Decryptor::AudioBuffers()));
+ Decryptor::AudioFrames()));
+ EXPECT_CALL(*this, OnWaitingForDecryptionKey());
decoder_->Decode(encrypted_buffer_,
base::Bind(&DecryptingAudioDecoderTest::DecodeDone,
base::Unretained(this)));
@@ -213,7 +216,7 @@ class DecryptingAudioDecoderTest : public testing::Test {
void AbortPendingAudioDecodeCB() {
if (!pending_audio_decode_cb_.is_null()) {
base::ResetAndReturn(&pending_audio_decode_cb_).Run(
- Decryptor::kSuccess, Decryptor::AudioBuffers());
+ Decryptor::kSuccess, Decryptor::AudioFrames());
}
}
@@ -252,6 +255,8 @@ class DecryptingAudioDecoderTest : public testing::Test {
MOCK_METHOD1(DecryptorSet, void(bool));
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+
base::MessageLoop message_loop_;
scoped_ptr<DecryptingAudioDecoder> decoder_;
scoped_ptr<StrictMock<MockDecryptor> > decryptor_;
@@ -268,7 +273,7 @@ class DecryptingAudioDecoderTest : public testing::Test {
// Constant buffer/frames, to be used/returned by |decoder_| and |decryptor_|.
scoped_refptr<DecoderBuffer> encrypted_buffer_;
scoped_refptr<AudioBuffer> decoded_frame_;
- Decryptor::AudioBuffers decoded_frame_list_;
+ Decryptor::AudioFrames decoded_frame_list_;
private:
DISALLOW_COPY_AND_ASSIGN(DecryptingAudioDecoderTest);
@@ -325,7 +330,7 @@ TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_DecodeError) {
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kError,
- Decryptor::AudioBuffers()));
+ Decryptor::AudioFrames()));
DecodeAndExpect(encrypted_buffer_, AudioDecoder::kDecodeError);
}
@@ -413,7 +418,7 @@ TEST_F(DecryptingAudioDecoderTest, KeyAdded_DruingPendingDecode) {
// added.
key_added_cb_.Run();
base::ResetAndReturn(&pending_audio_decode_cb_).Run(
- Decryptor::kNoKey, Decryptor::AudioBuffers());
+ Decryptor::kNoKey, Decryptor::AudioFrames());
message_loop_.RunUntilIdle();
}
diff --git a/chromium/media/filters/decrypting_demuxer_stream.cc b/chromium/media/filters/decrypting_demuxer_stream.cc
index c4e6b847838..260190cd65f 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream.cc
@@ -9,13 +9,9 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
-#include "media/base/audio_decoder_config.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/decryptor.h"
-#include "media/base/demuxer_stream.h"
#include "media/base/pipeline.h"
-#include "media/base/video_decoder_config.h"
namespace media {
@@ -30,14 +26,17 @@ static bool IsStreamValidAndEncrypted(DemuxerStream* stream) {
DecryptingDemuxerStream::DecryptingDemuxerStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const base::Closure& waiting_for_decryption_key_cb)
: task_runner_(task_runner),
state_(kUninitialized),
+ waiting_for_decryption_key_cb_(waiting_for_decryption_key_cb),
demuxer_stream_(NULL),
set_decryptor_ready_cb_(set_decryptor_ready_cb),
decryptor_(NULL),
key_added_while_decrypt_pending_(false),
- weak_factory_(this) {}
+ weak_factory_(this) {
+}
void DecryptingDemuxerStream::Initialize(DemuxerStream* stream,
const PipelineStatusCB& status_cb) {
@@ -121,11 +120,16 @@ VideoDecoderConfig DecryptingDemuxerStream::video_decoder_config() {
return video_config_;
}
-DemuxerStream::Type DecryptingDemuxerStream::type() {
+DemuxerStream::Type DecryptingDemuxerStream::type() const {
DCHECK(state_ != kUninitialized && state_ != kDecryptorRequested) << state_;
return demuxer_stream_->type();
}
+DemuxerStream::Liveness DecryptingDemuxerStream::liveness() const {
+ DCHECK(state_ != kUninitialized && state_ != kDecryptorRequested) << state_;
+ return demuxer_stream_->liveness();
+}
+
void DecryptingDemuxerStream::EnableBitstreamConverter() {
demuxer_stream_->EnableBitstreamConverter();
}
@@ -244,6 +248,9 @@ void DecryptingDemuxerStream::DecryptBuffer(
buffer->data(), buffer->data_size());
decrypted->set_timestamp(buffer->timestamp());
decrypted->set_duration(buffer->duration());
+ if (buffer->is_key_frame())
+ decrypted->set_is_key_frame(true);
+
state_ = kIdle;
base::ResetAndReturn(&read_cb_).Run(kOk, decrypted);
return;
@@ -303,10 +310,17 @@ void DecryptingDemuxerStream::DeliverBuffer(
}
state_ = kWaitingForKey;
+ waiting_for_decryption_key_cb_.Run();
return;
}
DCHECK_EQ(status, Decryptor::kSuccess);
+
+ // Copy the key frame flag from the encrypted to decrypted buffer, assuming
+ // that the decryptor initialized the flag to false.
+ if (pending_buffer_to_decrypt_->is_key_frame())
+ decrypted_buffer->set_is_key_frame(true);
+
pending_buffer_to_decrypt_ = NULL;
state_ = kIdle;
base::ResetAndReturn(&read_cb_).Run(kOk, decrypted_buffer);
diff --git a/chromium/media/filters/decrypting_demuxer_stream.h b/chromium/media/filters/decrypting_demuxer_stream.h
index 09bf97aaf36..e8d82224a10 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.h
+++ b/chromium/media/filters/decrypting_demuxer_stream.h
@@ -30,7 +30,8 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
public:
DecryptingDemuxerStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const base::Closure& waiting_for_decryption_key_cb);
// Cancels all pending operations immediately and fires all pending callbacks.
~DecryptingDemuxerStream() override;
@@ -48,7 +49,8 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
void Read(const ReadCB& read_cb) override;
AudioDecoderConfig audio_decoder_config() override;
VideoDecoderConfig video_decoder_config() override;
- Type type() override;
+ Type type() const override;
+ Liveness liveness() const override;
void EnableBitstreamConverter() override;
bool SupportsConfigChanges() override;
VideoRotation video_rotation() override;
@@ -103,6 +105,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
PipelineStatusCB init_cb_;
ReadCB read_cb_;
base::Closure reset_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
// Pointer to the input demuxer stream that will feed us encrypted buffers.
DemuxerStream* demuxer_stream_;
@@ -124,9 +127,8 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
// decrypting again in case the newly added key is the correct decryption key.
bool key_added_while_decrypt_pending_;
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<DecryptingDemuxerStream> weak_factory_;
base::WeakPtr<DecryptingDemuxerStream> weak_this_;
+ base::WeakPtrFactory<DecryptingDemuxerStream> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(DecryptingDemuxerStream);
};
diff --git a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
index fa2a36cf947..e5d2567fab7 100644
--- a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
@@ -29,7 +29,7 @@ static const uint8 kFakeKeyId[] = { 0x4b, 0x65, 0x79, 0x20, 0x49, 0x44 };
static const uint8 kFakeIv[DecryptConfig::kDecryptionKeySize] = { 0 };
// Create a fake non-empty buffer in an encrypted stream. When |is_clear| is
-// ture, the buffer is not encrypted (signaled by an empty IV).
+// true, the buffer is not encrypted (signaled by an empty IV).
static scoped_refptr<DecoderBuffer> CreateFakeEncryptedStreamBuffer(
bool is_clear) {
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(kFakeBufferSize));
@@ -77,7 +77,9 @@ class DecryptingDemuxerStreamTest : public testing::Test {
message_loop_.message_loop_proxy(),
base::Bind(
&DecryptingDemuxerStreamTest::RequestDecryptorNotification,
- base::Unretained(this)))),
+ base::Unretained(this)),
+ base::Bind(&DecryptingDemuxerStreamTest::OnWaitingForDecryptionKey,
+ base::Unretained(this)))),
decryptor_(new StrictMock<MockDecryptor>()),
is_decryptor_set_(false),
input_audio_stream_(
@@ -225,6 +227,7 @@ class DecryptingDemuxerStreamTest : public testing::Test {
EXPECT_CALL(*decryptor_, Decrypt(_, encrypted_buffer_, _))
.WillRepeatedly(RunCallback<2>(Decryptor::kNoKey,
scoped_refptr<DecoderBuffer>()));
+ EXPECT_CALL(*this, OnWaitingForDecryptionKey());
demuxer_stream_->Read(base::Bind(&DecryptingDemuxerStreamTest::BufferReady,
base::Unretained(this)));
message_loop_.RunUntilIdle();
@@ -260,6 +263,8 @@ class DecryptingDemuxerStreamTest : public testing::Test {
MOCK_METHOD1(DecryptorSet, void(bool));
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+
base::MessageLoop message_loop_;
scoped_ptr<DecryptingDemuxerStream> demuxer_stream_;
scoped_ptr<StrictMock<MockDecryptor> > decryptor_;
diff --git a/chromium/media/filters/decrypting_video_decoder.cc b/chromium/media/filters/decrypting_video_decoder.cc
index 4a14bad5dcd..8f30331a38b 100644
--- a/chromium/media/filters/decrypting_video_decoder.cc
+++ b/chromium/media/filters/decrypting_video_decoder.cc
@@ -6,32 +6,35 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/debug/trace_event.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/decryptor.h"
#include "media/base/pipeline.h"
-#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
namespace media {
+const char DecryptingVideoDecoder::kDecoderName[] = "DecryptingVideoDecoder";
+
DecryptingVideoDecoder::DecryptingVideoDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const base::Closure& waiting_for_decryption_key_cb)
: task_runner_(task_runner),
state_(kUninitialized),
+ waiting_for_decryption_key_cb_(waiting_for_decryption_key_cb),
set_decryptor_ready_cb_(set_decryptor_ready_cb),
decryptor_(NULL),
key_added_while_decode_pending_(false),
trace_id_(0),
- weak_factory_(this) {}
+ weak_factory_(this) {
+}
std::string DecryptingVideoDecoder::GetDisplayName() const {
- return "DecryptingVideoDecoder";
+ return kDecoderName;
}
void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
@@ -268,6 +271,7 @@ void DecryptingVideoDecoder::DeliverFrame(
}
state_ = kWaitingForKey;
+ waiting_for_decryption_key_cb_.Run();
return;
}
diff --git a/chromium/media/filters/decrypting_video_decoder.h b/chromium/media/filters/decrypting_video_decoder.h
index 1911c79ab0b..a2a952844d9 100644
--- a/chromium/media/filters/decrypting_video_decoder.h
+++ b/chromium/media/filters/decrypting_video_decoder.h
@@ -28,7 +28,8 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
public:
DecryptingVideoDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const base::Closure& waiting_for_decryption_key_cb);
~DecryptingVideoDecoder() override;
// VideoDecoder implementation.
@@ -41,6 +42,8 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
+ static const char kDecoderName[];
+
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
// TODO(xhwang): Add a ASCII state diagram in this file after this class
@@ -86,6 +89,7 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
OutputCB output_cb_;
DecodeCB decode_cb_;
base::Closure reset_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
VideoDecoderConfig config_;
@@ -108,9 +112,8 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
// matching DecryptCB call (in DoDeliverFrame()).
uint32 trace_id_;
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<DecryptingVideoDecoder> weak_factory_;
base::WeakPtr<DecryptingVideoDecoder> weak_this_;
+ base::WeakPtrFactory<DecryptingVideoDecoder> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(DecryptingVideoDecoder);
};
diff --git a/chromium/media/filters/decrypting_video_decoder_unittest.cc b/chromium/media/filters/decrypting_video_decoder_unittest.cc
index 11dc52ae7a0..285bab67aa1 100644
--- a/chromium/media/filters/decrypting_video_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_video_decoder_unittest.cc
@@ -57,7 +57,9 @@ class DecryptingVideoDecoderTest : public testing::Test {
message_loop_.message_loop_proxy(),
base::Bind(
&DecryptingVideoDecoderTest::RequestDecryptorNotification,
- base::Unretained(this)))),
+ base::Unretained(this)),
+ base::Bind(&DecryptingVideoDecoderTest::OnWaitingForDecryptionKey,
+ base::Unretained(this)))),
decryptor_(new StrictMock<MockDecryptor>()),
num_decrypt_and_decode_calls_(0),
num_frames_in_decryptor_(0),
@@ -179,6 +181,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
void EnterWaitingForKeyState() {
EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kNoKey, null_video_frame_));
+ EXPECT_CALL(*this, OnWaitingForDecryptionKey());
decoder_->Decode(encrypted_buffer_,
base::Bind(&DecryptingVideoDecoderTest::DecodeDone,
base::Unretained(this)));
@@ -227,6 +230,8 @@ class DecryptingVideoDecoderTest : public testing::Test {
MOCK_METHOD1(DecryptorSet, void(bool));
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+
base::MessageLoop message_loop_;
scoped_ptr<DecryptingVideoDecoder> decoder_;
scoped_ptr<StrictMock<MockDecryptor> > decryptor_;
@@ -263,6 +268,7 @@ TEST_F(DecryptingVideoDecoderTest, Initialize_Failure) {
.WillRepeatedly(RunCallback<1>(false));
EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kVideo, _))
.WillRepeatedly(SaveArg<1>(&key_added_cb_));
+ EXPECT_CALL(*this, RequestDecryptorNotification(_)).Times(2);
InitializeAndExpectStatus(TestVideoConfig::NormalEncrypted(),
DECODER_ERROR_NOT_SUPPORTED);
@@ -333,7 +339,7 @@ TEST_F(DecryptingVideoDecoderTest, KeyAdded_DuringWaitingForKey) {
// Test the case where the a key is added when the decryptor is in
// kPendingDecode state.
-TEST_F(DecryptingVideoDecoderTest, KeyAdded_DruingPendingDecode) {
+TEST_F(DecryptingVideoDecoderTest, KeyAdded_DuringPendingDecode) {
Initialize();
EnterPendingDecodeState();
diff --git a/chromium/media/filters/default_media_permission.cc b/chromium/media/filters/default_media_permission.cc
new file mode 100644
index 00000000000..60623b9a781
--- /dev/null
+++ b/chromium/media/filters/default_media_permission.cc
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/default_media_permission.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
+
+namespace media {
+
+DefaultMediaPermission::DefaultMediaPermission(bool allow) : allow_(allow) {
+}
+
+DefaultMediaPermission::~DefaultMediaPermission() {
+}
+
+static void FirePermissionStatusCallback(
+ const MediaPermission::PermissionStatusCB& permission_status_cb,
+ bool allow) {
+ LOG(WARNING) << (allow ? "Allowing" : "Denying")
+ << "media permission request with a default value instead of "
+ "real user's consent. This should NOT be used for in a real "
+ "user-facing product.";
+ // Return the callback asynchronously.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(permission_status_cb, allow));
+}
+
+void DefaultMediaPermission::HasPermission(
+ Type type,
+ const GURL& /* security_origin */,
+ const PermissionStatusCB& permission_status_cb) {
+ CHECK_EQ(PROTECTED_MEDIA_IDENTIFIER, type);
+ FirePermissionStatusCallback(permission_status_cb, allow_);
+}
+
+void DefaultMediaPermission::RequestPermission(
+ Type type,
+ const GURL& /* security_origin */,
+ const PermissionStatusCB& permission_status_cb) {
+ CHECK_EQ(PROTECTED_MEDIA_IDENTIFIER, type);
+ FirePermissionStatusCallback(permission_status_cb, allow_);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/default_media_permission.h b/chromium/media/filters/default_media_permission.h
new file mode 100644
index 00000000000..9ad295a9c05
--- /dev/null
+++ b/chromium/media/filters/default_media_permission.h
@@ -0,0 +1,39 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_DEFAULT_MEDIA_PERMISSION_H_
+#define MEDIA_FILTERS_DEFAULT_MEDIA_PERMISSION_H_
+
+#include "media/base/media_export.h"
+#include "media/base/media_permission.h"
+
+namespace media {
+
+// Default MediaPermission implementation that will always allow or deny the
+// permission request/check based on |allow|.
+// WARNING: This class allows or denies permission request/check without real
+// user's consent. It should NOT be used in a real user facing product.
+class MEDIA_EXPORT DefaultMediaPermission : public MediaPermission {
+ public:
+ explicit DefaultMediaPermission(bool allow);
+ ~DefaultMediaPermission() override;
+
+ // media::MediaPermission implementation.
+ void HasPermission(Type type,
+ const GURL& security_origin,
+ const PermissionStatusCB& permission_status_cb) override;
+ void RequestPermission(
+ Type type,
+ const GURL& security_origin,
+ const PermissionStatusCB& permission_status_cb) override;
+
+ private:
+ const bool allow_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultMediaPermission);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_DEFAULT_MEDIA_PERMISSION_H_
diff --git a/chromium/media/filters/fake_video_decoder.cc b/chromium/media/filters/fake_video_decoder.cc
index f7f1abd8540..4c6fa9a7712 100644
--- a/chromium/media/filters/fake_video_decoder.cc
+++ b/chromium/media/filters/fake_video_decoder.cc
@@ -4,8 +4,6 @@
#include "media/filters/fake_video_decoder.h"
-#include "base/bind.h"
-#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
#include "media/base/bind_to_current_loop.h"
@@ -14,12 +12,15 @@
namespace media {
FakeVideoDecoder::FakeVideoDecoder(int decoding_delay,
- int max_parallel_decoding_requests)
+ int max_parallel_decoding_requests,
+ const BytesDecodedCB& bytes_decoded_cb)
: decoding_delay_(decoding_delay),
max_parallel_decoding_requests_(max_parallel_decoding_requests),
+ bytes_decoded_cb_(bytes_decoded_cb),
state_(STATE_UNINITIALIZED),
hold_decode_(false),
total_bytes_decoded_(0),
+ fail_to_initialize_(false),
weak_factory_(this) {
DCHECK_GE(decoding_delay, 0);
}
@@ -66,8 +67,13 @@ void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
decoded_frames_.clear();
}
- state_ = STATE_NORMAL;
- init_cb_.RunOrHold(PIPELINE_OK);
+ if (fail_to_initialize_) {
+ state_ = STATE_ERROR;
+ init_cb_.RunOrHold(DECODER_ERROR_NOT_SUPPORTED);
+ } else {
+ state_ = STATE_NORMAL;
+ init_cb_.RunOrHold(PIPELINE_OK);
+ }
}
void FakeVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
@@ -180,6 +186,10 @@ void FakeVideoDecoder::SimulateError() {
decoded_frames_.clear();
}
+void FakeVideoDecoder::SimulateFailureToInit() {
+ fail_to_initialize_ = true;
+}
+
int FakeVideoDecoder::GetMaxDecodeRequests() const {
return max_parallel_decoding_requests_;
}
@@ -189,8 +199,10 @@ void FakeVideoDecoder::OnFrameDecoded(int buffer_size,
Status status) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (status == kOk)
+ if (status == kOk) {
total_bytes_decoded_ += buffer_size;
+ bytes_decoded_cb_.Run(buffer_size);
+ }
decode_cb.Run(status);
}
diff --git a/chromium/media/filters/fake_video_decoder.h b/chromium/media/filters/fake_video_decoder.h
index d7150b69530..bf407b995d0 100644
--- a/chromium/media/filters/fake_video_decoder.h
+++ b/chromium/media/filters/fake_video_decoder.h
@@ -18,7 +18,7 @@
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
using base::ResetAndReturn;
@@ -28,11 +28,16 @@ class SingleThreadTaskRunner;
namespace media {
+typedef base::Callback<void(int)> BytesDecodedCB;
+
class FakeVideoDecoder : public VideoDecoder {
public:
// Constructs an object with a decoding delay of |decoding_delay| frames.
+ // |bytes_decoded_cb| is called after each decode. The sum of the byte
+ // count over all calls will be equal to total_bytes_decoded().
FakeVideoDecoder(int decoding_delay,
- int max_parallel_decoding_requests);
+ int max_parallel_decoding_requests,
+ const BytesDecodedCB& bytes_decoded_cb);
~FakeVideoDecoder() override;
// VideoDecoder implementation.
@@ -61,6 +66,8 @@ class FakeVideoDecoder : public VideoDecoder {
void SatisfySingleDecode();
void SimulateError();
+ // Fail with status DECODER_ERROR_NOT_SUPPORTED when Initialize() is called.
+ void SimulateFailureToInit();
int total_bytes_decoded() const { return total_bytes_decoded_; }
@@ -90,6 +97,7 @@ class FakeVideoDecoder : public VideoDecoder {
const size_t decoding_delay_;
const int max_parallel_decoding_requests_;
+ BytesDecodedCB bytes_decoded_cb_;
State state_;
@@ -107,6 +115,8 @@ class FakeVideoDecoder : public VideoDecoder {
int total_bytes_decoded_;
+ bool fail_to_initialize_;
+
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<FakeVideoDecoder> weak_factory_;
diff --git a/chromium/media/filters/fake_video_decoder_unittest.cc b/chromium/media/filters/fake_video_decoder_unittest.cc
index ec0d303150e..448dcdd5197 100644
--- a/chromium/media/filters/fake_video_decoder_unittest.cc
+++ b/chromium/media/filters/fake_video_decoder_unittest.cc
@@ -30,10 +30,15 @@ class FakeVideoDecoderTest
public testing::WithParamInterface<FakeVideoDecoderTestParams> {
public:
FakeVideoDecoderTest()
- : decoder_(new FakeVideoDecoder(GetParam().decoding_delay,
- GetParam().max_decode_requests)),
+ : decoder_(new FakeVideoDecoder(
+ GetParam().decoding_delay,
+ GetParam().max_decode_requests,
+ base::Bind(&FakeVideoDecoderTest::OnBytesDecoded,
+ base::Unretained(this)))),
num_input_buffers_(0),
num_decoded_frames_(0),
+ num_bytes_decoded_(0),
+ total_bytes_in_buffers_(0),
last_decode_status_(VideoDecoder::kOk),
pending_decode_requests_(0),
is_reset_pending_(false) {}
@@ -42,16 +47,17 @@ class FakeVideoDecoderTest
Destroy();
}
- void InitializeWithConfig(const VideoDecoderConfig& config) {
+ void InitializeWithConfigAndExpectStatus(const VideoDecoderConfig& config,
+ PipelineStatus status) {
decoder_->Initialize(
- config, false, NewExpectedStatusCB(PIPELINE_OK),
+ config, false, NewExpectedStatusCB(status),
base::Bind(&FakeVideoDecoderTest::FrameReady, base::Unretained(this)));
message_loop_.RunUntilIdle();
current_config_ = config;
}
void Initialize() {
- InitializeWithConfig(TestVideoConfig::Normal());
+ InitializeWithConfigAndExpectStatus(TestVideoConfig::Normal(), PIPELINE_OK);
}
void EnterPendingInitState() {
@@ -77,6 +83,10 @@ class FakeVideoDecoderTest
num_decoded_frames_++;
}
+ void OnBytesDecoded(int count) {
+ num_bytes_decoded_ += count;
+ }
+
enum CallbackResult {
PENDING,
OK,
@@ -115,6 +125,7 @@ class FakeVideoDecoderTest
current_config_,
base::TimeDelta::FromMilliseconds(kDurationMs * num_input_buffers_),
base::TimeDelta::FromMilliseconds(kDurationMs));
+ total_bytes_in_buffers_ += buffer->data_size();
} else {
buffer = DecoderBuffer::CreateEOSBuffer();
}
@@ -213,6 +224,8 @@ class FakeVideoDecoderTest
int num_input_buffers_;
int num_decoded_frames_;
+ int num_bytes_decoded_;
+ int total_bytes_in_buffers_;
// Callback result/status.
VideoDecoder::Status last_decode_status_;
@@ -237,10 +250,19 @@ TEST_P(FakeVideoDecoderTest, Initialize) {
Initialize();
}
+TEST_P(FakeVideoDecoderTest, SimulateFailureToInitialize) {
+ decoder_->SimulateFailureToInit();
+ InitializeWithConfigAndExpectStatus(TestVideoConfig::Normal(),
+ DECODER_ERROR_NOT_SUPPORTED);
+ Decode();
+ EXPECT_EQ(last_decode_status_, VideoDecoder::kDecodeError);
+}
+
TEST_P(FakeVideoDecoderTest, Read_AllFrames) {
Initialize();
ReadAllFrames();
EXPECT_EQ(kTotalBuffers, num_decoded_frames_);
+ EXPECT_EQ(total_bytes_in_buffers_, num_bytes_decoded_);
}
TEST_P(FakeVideoDecoderTest, Read_DecodingDelay) {
@@ -254,7 +276,9 @@ TEST_P(FakeVideoDecoderTest, Read_DecodingDelay) {
}
TEST_P(FakeVideoDecoderTest, Read_ZeroDelay) {
- decoder_.reset(new FakeVideoDecoder(0, 1));
+ decoder_.reset(new FakeVideoDecoder(
+ 0, 1, base::Bind(&FakeVideoDecoderTest::OnBytesDecoded,
+ base::Unretained(this))));
Initialize();
while (num_input_buffers_ < kTotalBuffers) {
@@ -321,8 +345,18 @@ TEST_P(FakeVideoDecoderTest, ReadWithHold_DecodingDelay) {
TEST_P(FakeVideoDecoderTest, Reinitialize) {
Initialize();
ReadOneFrame();
- InitializeWithConfig(TestVideoConfig::Large());
+ InitializeWithConfigAndExpectStatus(TestVideoConfig::Large(), PIPELINE_OK);
+ ReadOneFrame();
+}
+
+TEST_P(FakeVideoDecoderTest, SimulateFailureToReinitialize) {
+ Initialize();
ReadOneFrame();
+ decoder_->SimulateFailureToInit();
+ InitializeWithConfigAndExpectStatus(TestVideoConfig::Normal(),
+ DECODER_ERROR_NOT_SUPPORTED);
+ Decode();
+ EXPECT_EQ(last_decode_status_, VideoDecoder::kDecodeError);
}
// Reinitializing the decoder during the middle of the decoding process can
diff --git a/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc b/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
index 46aeb786744..ae17302ddbe 100644
--- a/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
+++ b/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
@@ -25,8 +25,6 @@ class FFmpegAACBitstreamConverterTest : public testing::Test {
test_context_.extradata_size = sizeof(context_header_);
}
- virtual ~FFmpegAACBitstreamConverterTest() {}
-
void CreatePacket(AVPacket* packet, const uint8* data, uint32 data_size) {
// Create new packet sized of |data_size| from |data|.
EXPECT_EQ(av_new_packet(packet, data_size), 0);
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
index ae4f3fb52e1..2f8bf1b87f7 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder.cc
@@ -13,7 +13,6 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/limits.h"
-#include "media/base/sample_format.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
@@ -263,11 +262,11 @@ bool FFmpegAudioDecoder::FFmpegDecode(
<< "This is quite possibly a bug in the audio decoder not handling "
<< "end of stream AVPackets correctly.";
- MEDIA_LOG(log_cb_)
+ MEDIA_LOG(DEBUG, log_cb_)
<< "Dropping audio frame which failed decode with timestamp: "
- << buffer->timestamp().InMicroseconds() << " us, duration: "
- << buffer->duration().InMicroseconds() << " us, packet size: "
- << buffer->data_size() << " bytes";
+ << buffer->timestamp().InMicroseconds()
+ << " us, duration: " << buffer->duration().InMicroseconds()
+ << " us, packet size: " << buffer->data_size() << " bytes";
break;
}
@@ -293,9 +292,9 @@ bool FFmpegAudioDecoder::FFmpegDecode(
if (config_.codec() == kCodecAAC &&
av_frame_->sample_rate == 2 * config_.samples_per_second()) {
- MEDIA_LOG(log_cb_) << "Implicit HE-AAC signalling is being used."
- << " Please use mp4a.40.5 instead of mp4a.40.2 in"
- << " the mimetype.";
+ MEDIA_LOG(DEBUG, log_cb_) << "Implicit HE-AAC signalling is being"
+ << " used. Please use mp4a.40.5 instead of"
+ << " mp4a.40.2 in the mimetype.";
}
// This is an unrecoverable error, so bail out.
av_frame_unref(av_frame_.get());
diff --git a/chromium/media/filters/ffmpeg_demuxer.cc b/chromium/media/filters/ffmpeg_demuxer.cc
index b8665efeb3b..48eaf447ea3 100644
--- a/chromium/media/filters/ffmpeg_demuxer.cc
+++ b/chromium/media/filters/ffmpeg_demuxer.cc
@@ -5,28 +5,24 @@
#include "media/filters/ffmpeg_demuxer.h"
#include <algorithm>
-#include <string>
#include "base/base64.h"
#include "base/bind.h"
-#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/sparse_histogram.h"
+#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/sys_byteorder.h"
#include "base/task_runner_util.h"
+#include "base/thread_task_runner_handle.h"
#include "base/time/time.h"
-#include "media/base/audio_decoder_config.h"
#include "media/base/bind_to_current_loop.h"
-#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
-#include "media/base/video_decoder_config.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_aac_bitstream_converter.h"
#include "media/filters/ffmpeg_bitstream_converter.h"
@@ -90,9 +86,10 @@ static base::TimeDelta ExtractStartTime(AVStream* stream,
FFmpegDemuxerStream::FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
AVStream* stream)
: demuxer_(demuxer),
- task_runner_(base::MessageLoopProxy::current()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
stream_(stream),
type_(UNKNOWN),
+ liveness_(LIVENESS_UNKNOWN),
end_of_stream_(false),
last_packet_timestamp_(kNoTimestamp()),
last_packet_duration_(kNoTimestamp()),
@@ -164,10 +161,16 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
return;
encryption_key_id_.assign(enc_key_id);
- demuxer_->FireNeedKey(kWebMInitDataType, enc_key_id);
+ demuxer_->OnEncryptedMediaInitData(EmeInitDataType::WEBM, enc_key_id);
}
}
+FFmpegDemuxerStream::~FFmpegDemuxerStream() {
+ DCHECK(!demuxer_);
+ DCHECK(read_cb_.is_null());
+ DCHECK(buffer_queue_.IsEmpty());
+}
+
void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -255,7 +258,7 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
// allow front discard padding on the first buffer. Otherwise the discard
// helper can't figure out which data to discard. See AudioDiscardHelper.
int discard_front_samples = base::ByteSwapToLE32(*skip_samples_ptr);
- if (last_packet_timestamp_ != kNoTimestamp()) {
+ if (last_packet_timestamp_ != kNoTimestamp() && discard_front_samples) {
DLOG(ERROR) << "Skip samples are only allowed for the first packet.";
discard_front_samples = 0;
}
@@ -360,6 +363,9 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
}
}
+ if (packet.get()->flags & AV_PKT_FLAG_KEY)
+ buffer->set_is_key_frame(true);
+
last_packet_timestamp_ = buffer->timestamp();
last_packet_duration_ = buffer->duration();
@@ -400,11 +406,16 @@ void FFmpegDemuxerStream::Stop() {
end_of_stream_ = true;
}
-DemuxerStream::Type FFmpegDemuxerStream::type() {
+DemuxerStream::Type FFmpegDemuxerStream::type() const {
DCHECK(task_runner_->BelongsToCurrentThread());
return type_;
}
+DemuxerStream::Liveness FFmpegDemuxerStream::liveness() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ return liveness_;
+}
+
void FFmpegDemuxerStream::Read(const ReadCB& read_cb) {
DCHECK(task_runner_->BelongsToCurrentThread());
CHECK(read_cb_.is_null()) << "Overlapping reads are not supported";
@@ -470,10 +481,10 @@ VideoRotation FFmpegDemuxerStream::video_rotation() {
return video_rotation_;
}
-FFmpegDemuxerStream::~FFmpegDemuxerStream() {
- DCHECK(!demuxer_);
- DCHECK(read_cb_.is_null());
- DCHECK(buffer_queue_.IsEmpty());
+void FFmpegDemuxerStream::SetLiveness(Liveness liveness) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(liveness_, LIVENESS_UNKNOWN);
+ liveness_ = liveness;
}
base::TimeDelta FFmpegDemuxerStream::GetElapsedTime() const {
@@ -555,7 +566,7 @@ base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
FFmpegDemuxer::FFmpegDemuxer(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
DataSource* data_source,
- const NeedKeyCB& need_key_cb,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const scoped_refptr<MediaLog>& media_log)
: host_(NULL),
task_runner_(task_runner),
@@ -568,10 +579,9 @@ FFmpegDemuxer::FFmpegDemuxer(
start_time_(kNoTimestamp()),
preferred_stream_for_seeking_(-1, kNoTimestamp()),
fallback_stream_for_seeking_(-1, kNoTimestamp()),
- liveness_(LIVENESS_UNKNOWN),
text_enabled_(false),
duration_known_(false),
- need_key_cb_(need_key_cb),
+ encrypted_media_init_data_cb_(encrypted_media_init_data_cb),
weak_factory_(this) {
DCHECK(task_runner_.get());
DCHECK(data_source_);
@@ -673,6 +683,12 @@ void FFmpegDemuxer::Initialize(DemuxerHost* host,
// available, so add a metadata entry to ensure some is always present.
av_dict_set(&format_context->metadata, "skip_id3v1_tags", "", 0);
+ // Ensure ffmpeg doesn't give up too early while looking for stream params;
+ // this does not increase the amount of data downloaded. The default value
+ // is 5 AV_TIME_BASE units (1 second each), which prevents some oddly muxed
+ // streams from being detected properly; this value was chosen arbitrarily.
+ format_context->max_analyze_duration2 = 60 * AV_TIME_BASE;
+
// Open the AVFormatContext using our glue layer.
CHECK(blocking_thread_.Start());
base::PostTaskAndReplyWithResult(
@@ -708,11 +724,6 @@ base::TimeDelta FFmpegDemuxer::GetStartTime() const {
return std::max(start_time_, base::TimeDelta());
}
-Demuxer::Liveness FFmpegDemuxer::GetLiveness() const {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return liveness_;
-}
-
void FFmpegDemuxer::AddTextStreams() {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -823,10 +834,11 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
// If no estimate is found, the stream entry will be kInfiniteDuration().
std::vector<base::TimeDelta> start_time_estimates(format_context->nb_streams,
kInfiniteDuration());
- if (format_context->packet_buffer &&
+ const AVFormatInternal* internal = format_context->internal;
+ if (internal && internal->packet_buffer &&
format_context->start_time != static_cast<int64>(AV_NOPTS_VALUE)) {
- struct AVPacketList* packet_buffer = format_context->packet_buffer;
- while (packet_buffer != format_context->packet_buffer_end) {
+ struct AVPacketList* packet_buffer = internal->packet_buffer;
+ while (packet_buffer != internal->packet_buffer_end) {
DCHECK_LT(static_cast<size_t>(packet_buffer->pkt.stream_index),
start_time_estimates.size());
const AVStream* stream =
@@ -985,11 +997,11 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
timeline_offset_ += start_time_;
if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) {
- liveness_ = LIVENESS_LIVE;
+ SetLiveness(DemuxerStream::LIVENESS_LIVE);
} else if (max_duration != kInfiniteDuration()) {
- liveness_ = LIVENESS_RECORDED;
+ SetLiveness(DemuxerStream::LIVENESS_RECORDED);
} else {
- liveness_ = LIVENESS_UNKNOWN;
+ SetLiveness(DemuxerStream::LIVENESS_UNKNOWN);
}
// Good to go: set the duration and bitrate and notify we're done
@@ -1034,6 +1046,9 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
AVCodec* codec = avcodec_find_decoder(video_codec->codec_id);
if (codec) {
media_log_->SetStringProperty("video_codec_name", codec->name);
+ } else if (video_codec->codec_id == AV_CODEC_ID_VP9) {
+ // ffmpeg doesn't know about VP9 decoder. So we need to log it explicitly.
+ media_log_->SetStringProperty("video_codec_name", "vp9");
}
media_log_->SetIntegerProperty("width", video_codec->width);
@@ -1239,11 +1254,12 @@ void FFmpegDemuxer::StreamHasEnded() {
}
}
-void FFmpegDemuxer::FireNeedKey(const std::string& init_data_type,
- const std::string& encryption_key_id) {
+void FFmpegDemuxer::OnEncryptedMediaInitData(
+ EmeInitDataType init_data_type,
+ const std::string& encryption_key_id) {
std::vector<uint8> key_id_local(encryption_key_id.begin(),
encryption_key_id.end());
- need_key_cb_.Run(init_data_type, key_id_local);
+ encrypted_media_init_data_cb_.Run(init_data_type, key_id_local);
}
void FFmpegDemuxer::NotifyCapacityAvailable() {
@@ -1272,4 +1288,12 @@ void FFmpegDemuxer::OnDataSourceError() {
host_->OnDemuxerError(PIPELINE_ERROR_READ);
}
+void FFmpegDemuxer::SetLiveness(DemuxerStream::Liveness liveness) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ for (const auto& stream : streams_) { // |stream| is a ref to a pointer.
+ if (stream)
+ stream->SetLiveness(liveness);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/filters/ffmpeg_demuxer.h b/chromium/media/filters/ffmpeg_demuxer.h
index 82ac5dea198..329364ff123 100644
--- a/chromium/media/filters/ffmpeg_demuxer.h
+++ b/chromium/media/filters/ffmpeg_demuxer.h
@@ -86,7 +86,8 @@ class FFmpegDemuxerStream : public DemuxerStream {
}
// DemuxerStream implementation.
- Type type() override;
+ Type type() const override;
+ Liveness liveness() const override;
void Read(const ReadCB& read_cb) override;
void EnableBitstreamConverter() override;
bool SupportsConfigChanges() override;
@@ -94,6 +95,8 @@ class FFmpegDemuxerStream : public DemuxerStream {
VideoDecoderConfig video_decoder_config() override;
VideoRotation video_rotation() override;
+ void SetLiveness(Liveness liveness);
+
// Returns the range of buffered data in this stream.
Ranges<base::TimeDelta> GetBufferedRanges() const;
@@ -136,6 +139,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
AudioDecoderConfig audio_config_;
VideoDecoderConfig video_config_;
Type type_;
+ Liveness liveness_;
base::TimeDelta duration_;
bool end_of_stream_;
base::TimeDelta last_packet_timestamp_;
@@ -160,7 +164,7 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
public:
FFmpegDemuxer(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
DataSource* data_source,
- const NeedKeyCB& need_key_cb,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const scoped_refptr<MediaLog>& media_log);
~FFmpegDemuxer() override;
@@ -173,11 +177,11 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
base::Time GetTimelineOffset() const override;
DemuxerStream* GetStream(DemuxerStream::Type type) override;
base::TimeDelta GetStartTime() const override;
- Liveness GetLiveness() const override;
- // Calls |need_key_cb_| with the initialization data encountered in the file.
- void FireNeedKey(const std::string& init_data_type,
- const std::string& encryption_key_id);
+ // Calls |encrypted_media_init_data_cb_| with the initialization data
+ // encountered in the file.
+ void OnEncryptedMediaInitData(EmeInitDataType init_data_type,
+ const std::string& encryption_key_id);
// Allow FFmpegDemuxerStream to notify us when there is updated information
// about capacity and what buffered data is available.
@@ -225,6 +229,8 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// the text renderer to bind each text stream to the cue rendering engine.
void AddTextStreams();
+ void SetLiveness(DemuxerStream::Liveness liveness);
+
DemuxerHost* host_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
@@ -283,9 +289,6 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// time if the file doesn't have an association to Time.
base::Time timeline_offset_;
- // Liveness of the stream.
- Liveness liveness_;
-
// Whether text streams have been enabled for this demuxer.
bool text_enabled_;
@@ -297,7 +300,7 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
scoped_ptr<BlockingUrlProtocol> url_protocol_;
scoped_ptr<FFmpegGlue> glue_;
- const NeedKeyCB need_key_cb_;
+ const EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<FFmpegDemuxer> weak_factory_;
diff --git a/chromium/media/filters/ffmpeg_demuxer_unittest.cc b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
index da35121a9f7..15f53fbfac7 100644
--- a/chromium/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
@@ -19,7 +19,6 @@
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/file_data_source.h"
#include "media/formats/mp4/avc.h"
-#include "media/formats/webm/webm_crypto_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::AnyNumber;
@@ -42,6 +41,11 @@ MATCHER(IsEndOfStreamBuffer,
return arg->end_of_stream();
}
+const uint8 kEncryptedMediaInitData[] = {
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+};
+
static void EosOnReadDone(bool* got_eos_buffer,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
@@ -78,13 +82,12 @@ class FFmpegDemuxerTest : public testing::Test {
CreateDataSource(name);
- Demuxer::NeedKeyCB need_key_cb =
- base::Bind(&FFmpegDemuxerTest::NeedKeyCB, base::Unretained(this));
+ Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
+ &FFmpegDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
- demuxer_.reset(new FFmpegDemuxer(message_loop_.message_loop_proxy(),
- data_source_.get(),
- need_key_cb,
- new MediaLog()));
+ demuxer_.reset(new FFmpegDemuxer(
+ message_loop_.message_loop_proxy(), data_source_.get(),
+ encrypted_media_init_data_cb, new MediaLog()));
}
MOCK_METHOD1(CheckPoint, void(int v));
@@ -108,13 +111,28 @@ class FFmpegDemuxerTest : public testing::Test {
MOCK_METHOD2(OnReadDoneCalled, void(int, int64));
+ struct ReadExpectation {
+ ReadExpectation(int size,
+ int64 timestamp_us,
+ const base::TimeDelta& discard_front_padding,
+ bool is_key_frame)
+ : size(size),
+ timestamp_us(timestamp_us),
+ discard_front_padding(discard_front_padding),
+ is_key_frame(is_key_frame) {
+ }
+
+ int size;
+ int64 timestamp_us;
+ base::TimeDelta discard_front_padding;
+ bool is_key_frame;
+ };
+
// Verifies that |buffer| has a specific |size| and |timestamp|.
// |location| simply indicates where the call to this function was made.
// This makes it easier to track down where test failures occur.
void OnReadDone(const tracked_objects::Location& location,
- int size,
- int64 timestamp_us,
- base::TimeDelta discard_front_padding,
+ const ReadExpectation& read_expectation,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
std::string location_str;
@@ -122,52 +140,51 @@ class FFmpegDemuxerTest : public testing::Test {
location_str += "\n";
SCOPED_TRACE(location_str);
EXPECT_EQ(status, DemuxerStream::kOk);
- OnReadDoneCalled(size, timestamp_us);
EXPECT_TRUE(buffer.get() != NULL);
- EXPECT_EQ(size, buffer->data_size());
- EXPECT_EQ(timestamp_us, buffer->timestamp().InMicroseconds());
- EXPECT_EQ(discard_front_padding, buffer->discard_padding().first);
+ EXPECT_EQ(read_expectation.size, buffer->data_size());
+ EXPECT_EQ(read_expectation.timestamp_us,
+ buffer->timestamp().InMicroseconds());
+ EXPECT_EQ(read_expectation.discard_front_padding,
+ buffer->discard_padding().first);
+ EXPECT_EQ(read_expectation.is_key_frame, buffer->is_key_frame());
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
+ OnReadDoneCalled(read_expectation.size, read_expectation.timestamp_us);
message_loop_.PostTask(FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
}
DemuxerStream::ReadCB NewReadCB(const tracked_objects::Location& location,
int size,
- int64 timestamp_us) {
- EXPECT_CALL(*this, OnReadDoneCalled(size, timestamp_us));
- return base::Bind(&FFmpegDemuxerTest::OnReadDone,
- base::Unretained(this),
- location,
- size,
- timestamp_us,
- base::TimeDelta());
+ int64 timestamp_us,
+ bool is_key_frame) {
+ return NewReadCBWithCheckedDiscard(location,
+ size,
+ timestamp_us,
+ base::TimeDelta(),
+ is_key_frame);
}
DemuxerStream::ReadCB NewReadCBWithCheckedDiscard(
const tracked_objects::Location& location,
int size,
int64 timestamp_us,
- base::TimeDelta discard_front_padding) {
+ base::TimeDelta discard_front_padding,
+ bool is_key_frame) {
EXPECT_CALL(*this, OnReadDoneCalled(size, timestamp_us));
+
+ struct ReadExpectation read_expectation(size,
+ timestamp_us,
+ discard_front_padding,
+ is_key_frame);
+
return base::Bind(&FFmpegDemuxerTest::OnReadDone,
base::Unretained(this),
location,
- size,
- timestamp_us,
- discard_front_padding);
+ read_expectation);
}
- // TODO(xhwang): This is a workaround of the issue that move-only parameters
- // are not supported in mocked methods. Remove this when the issue is fixed
- // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
- // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
- MOCK_METHOD3(NeedKeyCBMock, void(const std::string& type,
- const uint8* init_data, int init_data_size));
- void NeedKeyCB(const std::string& type,
- const std::vector<uint8>& init_data) {
- const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
- NeedKeyCBMock(type, init_data_ptr, init_data.size());
- }
+ MOCK_METHOD2(OnEncryptedMediaInitData,
+ void(EmeInitDataType init_data_type,
+ const std::vector<uint8>& init_data));
// Accessor to demuxer internals.
void set_duration_known(bool duration_known) {
@@ -358,8 +375,12 @@ TEST_F(FFmpegDemuxerTest, Initialize_MultitrackText) {
}
TEST_F(FFmpegDemuxerTest, Initialize_Encrypted) {
- EXPECT_CALL(*this, NeedKeyCBMock(kWebMInitDataType, NotNull(),
- DecryptConfig::kDecryptionKeySize))
+ EXPECT_CALL(*this,
+ OnEncryptedMediaInitData(
+ EmeInitDataType::WEBM,
+ std::vector<uint8>(kEncryptedMediaInitData,
+ kEncryptedMediaInitData +
+ arraysize(kEncryptedMediaInitData))))
.Times(Exactly(2));
CreateDemuxer("bear-320x240-av_enc-av.webm");
@@ -374,10 +395,10 @@ TEST_F(FFmpegDemuxerTest, Read_Audio) {
// Attempt a read from the audio stream and run the message loop until done.
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
- audio->Read(NewReadCB(FROM_HERE, 29, 0));
+ audio->Read(NewReadCB(FROM_HERE, 29, 0, true));
message_loop_.Run();
- audio->Read(NewReadCB(FROM_HERE, 27, 3000));
+ audio->Read(NewReadCB(FROM_HERE, 27, 3000, true));
message_loop_.Run();
}
@@ -389,10 +410,10 @@ TEST_F(FFmpegDemuxerTest, Read_Video) {
// Attempt a read from the video stream and run the message loop until done.
DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
- video->Read(NewReadCB(FROM_HERE, 22084, 0));
+ video->Read(NewReadCB(FROM_HERE, 22084, 0, true));
message_loop_.Run();
- video->Read(NewReadCB(FROM_HERE, 1057, 33000));
+ video->Read(NewReadCB(FROM_HERE, 1057, 33000, false));
message_loop_.Run();
}
@@ -406,10 +427,10 @@ TEST_F(FFmpegDemuxerTest, Read_Text) {
ASSERT_TRUE(text_stream);
EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
- text_stream->Read(NewReadCB(FROM_HERE, 31, 0));
+ text_stream->Read(NewReadCB(FROM_HERE, 31, 0, true));
message_loop_.Run();
- text_stream->Read(NewReadCB(FROM_HERE, 19, 500000));
+ text_stream->Read(NewReadCB(FROM_HERE, 19, 500000, true));
message_loop_.Run();
}
@@ -438,9 +459,11 @@ TEST_F(FFmpegDemuxerTest, Read_VideoPositiveStartTime) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
- video->Read(NewReadCB(FROM_HERE, 5636, video_start_time.InMicroseconds()));
+ video->Read(NewReadCB(FROM_HERE, 5636, video_start_time.InMicroseconds(),
+ true));
message_loop_.Run();
- audio->Read(NewReadCB(FROM_HERE, 165, audio_start_time.InMicroseconds()));
+ audio->Read(NewReadCB(FROM_HERE, 165, audio_start_time.InMicroseconds(),
+ true));
message_loop_.Run();
// Verify that the start time is equal to the lowest timestamp (ie the
@@ -466,7 +489,7 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNoStartTime) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
demuxer_->GetStream(DemuxerStream::AUDIO)
- ->Read(NewReadCB(FROM_HERE, 4095, 0));
+ ->Read(NewReadCB(FROM_HERE, 4095, 0, true));
message_loop_.Run();
EXPECT_EQ(base::TimeDelta(), demuxer_->start_time());
@@ -478,8 +501,9 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNoStartTime) {
}
// TODO(dalecurtis): Test is disabled since FFmpeg does not currently guarantee
-// the order of demuxed packets in OGG containers. Re-enable once we decide to
-// either workaround it or attempt a fix upstream. See http://crbug.com/387996.
+// the order of demuxed packets in OGG containers. Re-enable and fix key frame
+// expectations once we decide to either workaround it or attempt a fix
+// upstream. See http://crbug.com/387996.
TEST_F(FFmpegDemuxerTest,
DISABLED_Read_AudioNegativeStartTimeAndOggDiscard_Bear) {
// Many ogg files have negative starting timestamps, so ensure demuxing and
@@ -494,27 +518,29 @@ TEST_F(FFmpegDemuxerTest,
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
audio->Read(
- NewReadCBWithCheckedDiscard(FROM_HERE, 40, 0, kInfiniteDuration()));
+ NewReadCBWithCheckedDiscard(FROM_HERE, 40, 0, kInfiniteDuration(),
+ true));
message_loop_.Run();
audio->Read(
- NewReadCBWithCheckedDiscard(FROM_HERE, 41, 2903, kInfiniteDuration()));
+ NewReadCBWithCheckedDiscard(FROM_HERE, 41, 2903, kInfiniteDuration(),
+ true));
message_loop_.Run();
audio->Read(NewReadCBWithCheckedDiscard(
- FROM_HERE, 173, 5805, base::TimeDelta::FromMicroseconds(10159)));
+ FROM_HERE, 173, 5805, base::TimeDelta::FromMicroseconds(10159), true));
message_loop_.Run();
- audio->Read(NewReadCB(FROM_HERE, 148, 18866));
+ audio->Read(NewReadCB(FROM_HERE, 148, 18866, true));
message_loop_.Run();
EXPECT_EQ(base::TimeDelta::FromMicroseconds(-15964),
demuxer_->start_time());
- video->Read(NewReadCB(FROM_HERE, 5751, 0));
+ video->Read(NewReadCB(FROM_HERE, 5751, 0, true));
message_loop_.Run();
- video->Read(NewReadCB(FROM_HERE, 846, 33367));
+ video->Read(NewReadCB(FROM_HERE, 846, 33367, true));
message_loop_.Run();
- video->Read(NewReadCB(FROM_HERE, 1255, 66733));
+ video->Read(NewReadCB(FROM_HERE, 1255, 66733, true));
message_loop_.Run();
// Seek back to the beginning and repeat the test.
@@ -540,10 +566,10 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard_Sync) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
audio->Read(NewReadCBWithCheckedDiscard(
- FROM_HERE, 1, 0, base::TimeDelta::FromMicroseconds(2902)));
+ FROM_HERE, 1, 0, base::TimeDelta::FromMicroseconds(2902), true));
message_loop_.Run();
- audio->Read(NewReadCB(FROM_HERE, 1, 2902));
+ audio->Read(NewReadCB(FROM_HERE, 1, 2902, true));
message_loop_.Run();
EXPECT_EQ(base::TimeDelta::FromMicroseconds(-2902),
demuxer_->start_time());
@@ -552,13 +578,13 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard_Sync) {
// must always be greater than zero.
EXPECT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
- video->Read(NewReadCB(FROM_HERE, 9997, 0));
+ video->Read(NewReadCB(FROM_HERE, 9997, 0, true));
message_loop_.Run();
- video->Read(NewReadCB(FROM_HERE, 16, 33241));
+ video->Read(NewReadCB(FROM_HERE, 16, 33241, false));
message_loop_.Run();
- video->Read(NewReadCB(FROM_HERE, 631, 66482));
+ video->Read(NewReadCB(FROM_HERE, 631, 66482, false));
message_loop_.Run();
// Seek back to the beginning and repeat the test.
@@ -646,7 +672,7 @@ TEST_F(FFmpegDemuxerTest, Seek) {
ASSERT_TRUE(audio);
// Read a video packet and release it.
- video->Read(NewReadCB(FROM_HERE, 22084, 0));
+ video->Read(NewReadCB(FROM_HERE, 22084, 0, true));
message_loop_.Run();
// Issue a simple forward seek, which should discard queued packets.
@@ -656,19 +682,19 @@ TEST_F(FFmpegDemuxerTest, Seek) {
event.RunAndWaitForStatus(PIPELINE_OK);
// Audio read #1.
- audio->Read(NewReadCB(FROM_HERE, 145, 803000));
+ audio->Read(NewReadCB(FROM_HERE, 145, 803000, true));
message_loop_.Run();
// Audio read #2.
- audio->Read(NewReadCB(FROM_HERE, 148, 826000));
+ audio->Read(NewReadCB(FROM_HERE, 148, 826000, true));
message_loop_.Run();
// Video read #1.
- video->Read(NewReadCB(FROM_HERE, 5425, 801000));
+ video->Read(NewReadCB(FROM_HERE, 5425, 801000, true));
message_loop_.Run();
// Video read #2.
- video->Read(NewReadCB(FROM_HERE, 1906, 834000));
+ video->Read(NewReadCB(FROM_HERE, 1906, 834000, false));
message_loop_.Run();
}
@@ -690,7 +716,7 @@ TEST_F(FFmpegDemuxerTest, SeekText) {
ASSERT_TRUE(audio);
// Read a text packet and release it.
- text_stream->Read(NewReadCB(FROM_HERE, 31, 0));
+ text_stream->Read(NewReadCB(FROM_HERE, 31, 0, true));
message_loop_.Run();
// Issue a simple forward seek, which should discard queued packets.
@@ -700,27 +726,27 @@ TEST_F(FFmpegDemuxerTest, SeekText) {
event.RunAndWaitForStatus(PIPELINE_OK);
// Audio read #1.
- audio->Read(NewReadCB(FROM_HERE, 145, 803000));
+ audio->Read(NewReadCB(FROM_HERE, 145, 803000, true));
message_loop_.Run();
// Audio read #2.
- audio->Read(NewReadCB(FROM_HERE, 148, 826000));
+ audio->Read(NewReadCB(FROM_HERE, 148, 826000, true));
message_loop_.Run();
// Video read #1.
- video->Read(NewReadCB(FROM_HERE, 5425, 801000));
+ video->Read(NewReadCB(FROM_HERE, 5425, 801000, true));
message_loop_.Run();
// Video read #2.
- video->Read(NewReadCB(FROM_HERE, 1906, 834000));
+ video->Read(NewReadCB(FROM_HERE, 1906, 834000, false));
message_loop_.Run();
// Text read #1.
- text_stream->Read(NewReadCB(FROM_HERE, 19, 500000));
+ text_stream->Read(NewReadCB(FROM_HERE, 19, 500000, true));
message_loop_.Run();
// Text read #2.
- text_stream->Read(NewReadCB(FROM_HERE, 19, 1000000));
+ text_stream->Read(NewReadCB(FROM_HERE, 19, 1000000, true));
message_loop_.Run();
}
@@ -772,7 +798,7 @@ TEST_F(FFmpegDemuxerTest, SeekWithCuesBeforeFirstCluster) {
ASSERT_TRUE(audio);
// Read a video packet and release it.
- video->Read(NewReadCB(FROM_HERE, 22084, 0));
+ video->Read(NewReadCB(FROM_HERE, 22084, 0, true));
message_loop_.Run();
// Issue a simple forward seek, which should discard queued packets.
@@ -782,19 +808,19 @@ TEST_F(FFmpegDemuxerTest, SeekWithCuesBeforeFirstCluster) {
event.RunAndWaitForStatus(PIPELINE_OK);
// Audio read #1.
- audio->Read(NewReadCB(FROM_HERE, 40, 2403000));
+ audio->Read(NewReadCB(FROM_HERE, 40, 2403000, true));
message_loop_.Run();
// Audio read #2.
- audio->Read(NewReadCB(FROM_HERE, 42, 2406000));
+ audio->Read(NewReadCB(FROM_HERE, 42, 2406000, true));
message_loop_.Run();
// Video read #1.
- video->Read(NewReadCB(FROM_HERE, 5276, 2402000));
+ video->Read(NewReadCB(FROM_HERE, 5276, 2402000, true));
message_loop_.Run();
// Video read #2.
- video->Read(NewReadCB(FROM_HERE, 1740, 2436000));
+ video->Read(NewReadCB(FROM_HERE, 1740, 2436000, false));
message_loop_.Run();
}
diff --git a/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc b/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc
index e1900e8a135..db430e2b243 100644
--- a/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc
+++ b/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc
@@ -267,8 +267,6 @@ class FFmpegH264ToAnnexBBitstreamConverterTest : public testing::Test {
test_context_.extradata_size = sizeof(kHeaderDataOkWithFieldLen4);
}
- virtual ~FFmpegH264ToAnnexBBitstreamConverterTest() {}
-
void CreatePacket(AVPacket* packet, const uint8* data, uint32 data_size) {
// Create new packet sized of |data_size| from |data|.
EXPECT_EQ(av_new_packet(packet, data_size), 0);
diff --git a/chromium/media/filters/ffmpeg_video_decoder.cc b/chromium/media/filters/ffmpeg_video_decoder.cc
index 93dcee2157a..95daf52c64f 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder.cc
@@ -18,7 +18,6 @@
#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
-#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -44,7 +43,7 @@ static int GetThreadCount(AVCodecID codec_id) {
// Refer to http://crbug.com/93932 for tsan suppressions on decoding.
int decode_threads = kDecodeThreads;
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
if (threads.empty() || !base::StringToInt(threads, &decode_threads))
return decode_threads;
@@ -86,10 +85,16 @@ int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
// updated width/height/pix_fmt, which can change for adaptive
// content.
VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
+ if (format == VideoFrame::YV12 &&
+ codec_context->colorspace == AVCOL_SPC_BT709) {
+ format = VideoFrame::YV12HD;
+ }
+
if (format == VideoFrame::UNKNOWN)
return AVERROR(EINVAL);
DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
- format == VideoFrame::YV12J || format == VideoFrame::YV24);
+ format == VideoFrame::YV12J || format == VideoFrame::YV24 ||
+ format == VideoFrame::YV12HD);
gfx::Size size(codec_context->width, codec_context->height);
const int ret = av_image_check_size(size.width(), size.height(), 0, NULL);
@@ -234,6 +239,8 @@ void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
if (buffer->end_of_stream())
state_ = kDecodeFinished;
+ // VideoDecoderShim expects that |decode_cb| is called only after
+ // |output_cb_|.
decode_cb_bound.Run(kOk);
}
diff --git a/chromium/media/filters/frame_processor.cc b/chromium/media/filters/frame_processor.cc
index ee49ba38bfe..0528ff7c806 100644
--- a/chromium/media/filters/frame_processor.cc
+++ b/chromium/media/filters/frame_processor.cc
@@ -332,7 +332,7 @@ bool FrameProcessor::HandlePartialAppendWindowTrimming(
const scoped_refptr<StreamParserBuffer>& buffer) {
DCHECK(buffer->duration() > base::TimeDelta());
DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
- DCHECK(buffer->IsKeyframe());
+ DCHECK(buffer->is_key_frame());
const base::TimeDelta frame_end_timestamp =
buffer->timestamp() + buffer->duration();
@@ -446,7 +446,7 @@ bool FrameProcessor::ProcessFrame(
<< ", PTS=" << presentation_timestamp.InSecondsF()
<< ", DTS=" << decode_timestamp.InSecondsF()
<< ", DUR=" << frame_duration.InSecondsF()
- << ", RAP=" << frame->IsKeyframe();
+ << ", RAP=" << frame->is_key_frame();
// Sanity check the timestamps.
if (presentation_timestamp == kNoTimestamp()) {
@@ -638,7 +638,7 @@ bool FrameProcessor::ProcessFrame(
// 12.1. If the coded frame is not a random access point, then drop the
// coded frame and jump to the top of the loop to start processing
// the next coded frame.
- if (!frame->IsKeyframe()) {
+ if (!frame->is_key_frame()) {
DVLOG(3) << __FUNCTION__
<< ": Dropping frame that is not a random access point";
return true;
diff --git a/chromium/media/filters/frame_processor_unittest.cc b/chromium/media/filters/frame_processor_unittest.cc
index ff2f16c4a58..f90bfdbcfa3 100644
--- a/chromium/media/filters/frame_processor_unittest.cc
+++ b/chromium/media/filters/frame_processor_unittest.cc
@@ -11,6 +11,7 @@
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/time/time.h"
+#include "media/base/media_log.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/filters/chunk_demuxer.h"
@@ -27,8 +28,6 @@ typedef StreamParser::BufferQueue BufferQueue;
typedef StreamParser::TextBufferQueueMap TextBufferQueueMap;
typedef StreamParser::TrackId TrackId;
-static void LogFunc(const std::string& str) { DVLOG(1) << str; }
-
// Used for setting expectations on callbacks. Using a StrictMock also lets us
// test for missing or extra callbacks.
class FrameProcessorTestCallbackHelper {
@@ -266,7 +265,8 @@ class FrameProcessorTest : public testing::TestWithParam<bool> {
switch (type) {
case DemuxerStream::AUDIO: {
ASSERT_FALSE(audio_);
- audio_.reset(new ChunkDemuxerStream(DemuxerStream::AUDIO, true));
+ audio_.reset(new ChunkDemuxerStream(
+ DemuxerStream::AUDIO, DemuxerStream::LIVENESS_UNKNOWN, true));
AudioDecoderConfig decoder_config(kCodecVorbis,
kSampleFormatPlanarF32,
CHANNEL_LAYOUT_STEREO,
@@ -275,15 +275,16 @@ class FrameProcessorTest : public testing::TestWithParam<bool> {
0,
false);
frame_processor_->OnPossibleAudioConfigUpdate(decoder_config);
- ASSERT_TRUE(
- audio_->UpdateAudioConfig(decoder_config, base::Bind(&LogFunc)));
+ ASSERT_TRUE(audio_->UpdateAudioConfig(decoder_config,
+ base::Bind(&AddLogEntryForTest)));
break;
}
case DemuxerStream::VIDEO: {
ASSERT_FALSE(video_);
- video_.reset(new ChunkDemuxerStream(DemuxerStream::VIDEO, true));
+ video_.reset(new ChunkDemuxerStream(
+ DemuxerStream::VIDEO, DemuxerStream::LIVENESS_UNKNOWN, true));
ASSERT_TRUE(video_->UpdateVideoConfig(TestVideoConfig::Normal(),
- base::Bind(&LogFunc)));
+ base::Bind(&AddLogEntryForTest)));
break;
}
// TODO(wolenetz): Test text coded frame processing.
diff --git a/chromium/media/filters/gpu_video_accelerator_factories.cc b/chromium/media/filters/gpu_video_accelerator_factories.cc
deleted file mode 100644
index f9f56604d25..00000000000
--- a/chromium/media/filters/gpu_video_accelerator_factories.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/gpu_video_accelerator_factories.h"
-
-namespace media {
-
-GpuVideoAcceleratorFactories::~GpuVideoAcceleratorFactories() {}
-
-} // namespace media
diff --git a/chromium/media/filters/gpu_video_decoder.cc b/chromium/media/filters/gpu_video_decoder.cc
index 5ca6c67d9b5..7211655dfbd 100644
--- a/chromium/media/filters/gpu_video_decoder.cc
+++ b/chromium/media/filters/gpu_video_decoder.cc
@@ -13,20 +13,20 @@
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/stl_util.h"
-#include "base/synchronization/waitable_event.h"
#include "base/task_runner_util.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
-#include "media/base/pipeline_status.h"
#include "media/base/video_decoder_config.h"
-#include "media/filters/gpu_video_accelerator_factories.h"
+#include "media/renderers/gpu_video_accelerator_factories.h"
#include "third_party/skia/include/core/SkBitmap.h"
namespace media {
+const char GpuVideoDecoder::kDecoderName[] = "GpuVideoDecoder";
+
// Maximum number of concurrent VDA::Decode() operations GVD will maintain.
// Higher values allow better pipelining in the GPU, but also require more
// resources.
@@ -36,8 +36,9 @@ enum { kMaxInFlightDecodes = 4 };
// be on the beefy side.
static const size_t kSharedMemorySegmentBytes = 100 << 10;
-GpuVideoDecoder::SHMBuffer::SHMBuffer(base::SharedMemory* m, size_t s)
- : shm(m), size(s) {
+GpuVideoDecoder::SHMBuffer::SHMBuffer(scoped_ptr<base::SharedMemory> m,
+ size_t s)
+ : shm(m.Pass()), size(s) {
}
GpuVideoDecoder::SHMBuffer::~SHMBuffer() {}
@@ -95,33 +96,13 @@ void GpuVideoDecoder::Reset(const base::Closure& closure) {
vda_->Reset();
}
-static bool IsCodedSizeSupported(const gfx::Size& coded_size) {
-#if defined(OS_WIN)
- // Windows Media Foundation H.264 decoding does not support decoding videos
- // with any dimension smaller than 48 pixels:
- // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
- if (coded_size.width() < 48 || coded_size.height() < 48)
- return false;
-#endif
-
- // Only non-Windows, Ivy Bridge+ platforms can support more than 1920x1080.
- // We test against 1088 to account for 16x16 macroblocks.
- if (coded_size.width() <= 1920 && coded_size.height() <= 1088)
- return true;
-
- // NOTE: additional autodetection logic may require updating input buffer size
- // selection in platform-specific implementations, such as
- // V4L2VideoDecodeAccelerator.
- base::CPU cpu;
- bool hw_large_video_support =
- CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kIgnoreResolutionLimitsForAcceleratedVideoDecode) ||
- ((cpu.vendor_name() == "GenuineIntel") && cpu.model() >= 55);
- bool os_large_video_support = true;
-#if defined(OS_WIN)
- os_large_video_support = false;
-#endif
- return os_large_video_support && hw_large_video_support;
+static bool IsCodedSizeSupported(const gfx::Size& coded_size,
+ const gfx::Size& min_resolution,
+ const gfx::Size& max_resolution) {
+ return (coded_size.width() <= max_resolution.width() &&
+ coded_size.height() <= max_resolution.height() &&
+ coded_size.width() >= min_resolution.width() &&
+ coded_size.height() >= min_resolution.height());
}
// Report |status| to UMA and run |cb| with it. This is super-specific to the
@@ -136,7 +117,7 @@ static void ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB(
}
std::string GpuVideoDecoder::GetDisplayName() const {
- return "GpuVideoDecoder";
+ return kDecoderName;
}
void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
@@ -164,7 +145,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
- if (!IsCodedSizeSupported(config.coded_size())) {
+ if (!IsProfileSupported(config.profile(), config.coded_size())) {
status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
@@ -251,7 +232,7 @@ void GpuVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
}
size_t size = buffer->data_size();
- SHMBuffer* shm_buffer = GetSHM(size);
+ scoped_ptr<SHMBuffer> shm_buffer = GetSHM(size);
if (!shm_buffer) {
bound_decode_cb.Run(kDecodeError);
return;
@@ -263,9 +244,9 @@ void GpuVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
// Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
DCHECK(!ContainsKey(bitstream_buffers_in_decoder_, bitstream_buffer.id()));
- bitstream_buffers_in_decoder_.insert(
- std::make_pair(bitstream_buffer.id(),
- PendingDecoderBuffer(shm_buffer, buffer, decode_cb)));
+ bitstream_buffers_in_decoder_.insert(std::make_pair(
+ bitstream_buffer.id(),
+ PendingDecoderBuffer(shm_buffer.release(), buffer, decode_cb)));
DCHECK_LE(static_cast<int>(bitstream_buffers_in_decoder_.size()),
kMaxInFlightDecodes);
RecordBufferData(bitstream_buffer, *buffer.get());
@@ -382,33 +363,6 @@ void GpuVideoDecoder::DismissPictureBuffer(int32 id) {
// Postpone deletion until after it's returned to us.
}
-static void ReadPixelsSyncInner(
- const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
- uint32 texture_id,
- const gfx::Rect& visible_rect,
- const SkBitmap& pixels,
- base::WaitableEvent* event) {
- factories->ReadPixels(texture_id, visible_rect, pixels);
- event->Signal();
-}
-
-static void ReadPixelsSync(
- const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
- uint32 texture_id,
- const gfx::Rect& visible_rect,
- const SkBitmap& pixels) {
- base::WaitableEvent event(true, false);
- if (!factories->GetTaskRunner()->PostTask(FROM_HERE,
- base::Bind(&ReadPixelsSyncInner,
- factories,
- texture_id,
- visible_rect,
- pixels,
- &event)))
- return;
- event.Wait();
-}
-
void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
DVLOG(3) << "PictureReady()";
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
@@ -445,18 +399,13 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
DCHECK(decoder_texture_target_);
scoped_refptr<VideoFrame> frame(VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new gpu::MailboxHolder(
- pb.texture_mailbox(), decoder_texture_target_, 0 /* sync_point */)),
- BindToCurrentLoop(base::Bind(&GpuVideoDecoder::ReleaseMailbox,
- weak_factory_.GetWeakPtr(),
- factories_,
- picture.picture_buffer_id(),
- pb.texture_id())),
- pb.size(),
- visible_rect,
- natural_size,
- timestamp,
- base::Bind(&ReadPixelsSync, factories_, pb.texture_id(), visible_rect)));
+ gpu::MailboxHolder(pb.texture_mailbox(), decoder_texture_target_,
+ 0 /* sync_point */),
+ BindToCurrentLoop(base::Bind(
+ &GpuVideoDecoder::ReleaseMailbox, weak_factory_.GetWeakPtr(),
+ factories_, picture.picture_buffer_id(), pb.texture_id())),
+ pb.size(), visible_rect, natural_size, timestamp, picture.allow_overlay(),
+ true /* has_alpha */));
CHECK_GT(available_pictures_, 0);
--available_pictures_;
bool inserted =
@@ -523,25 +472,27 @@ void GpuVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id) {
vda_->ReusePictureBuffer(picture_buffer_id);
}
-GpuVideoDecoder::SHMBuffer* GpuVideoDecoder::GetSHM(size_t min_size) {
+scoped_ptr<GpuVideoDecoder::SHMBuffer> GpuVideoDecoder::GetSHM(
+ size_t min_size) {
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (available_shm_segments_.empty() ||
available_shm_segments_.back()->size < min_size) {
size_t size_to_allocate = std::max(min_size, kSharedMemorySegmentBytes);
- base::SharedMemory* shm = factories_->CreateSharedMemory(size_to_allocate);
+ scoped_ptr<base::SharedMemory> shm =
+ factories_->CreateSharedMemory(size_to_allocate);
// CreateSharedMemory() can return NULL during Shutdown.
if (!shm)
return NULL;
- return new SHMBuffer(shm, size_to_allocate);
+ return make_scoped_ptr(new SHMBuffer(shm.Pass(), size_to_allocate));
}
- SHMBuffer* ret = available_shm_segments_.back();
+ scoped_ptr<SHMBuffer> ret(available_shm_segments_.back());
available_shm_segments_.pop_back();
- return ret;
+ return ret.Pass();
}
-void GpuVideoDecoder::PutSHM(SHMBuffer* shm_buffer) {
+void GpuVideoDecoder::PutSHM(scoped_ptr<SHMBuffer> shm_buffer) {
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- available_shm_segments_.push_back(shm_buffer);
+ available_shm_segments_.push_back(shm_buffer.release());
}
void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
@@ -556,23 +507,20 @@ void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
return;
}
- PutSHM(it->second.shm_buffer);
+ PutSHM(make_scoped_ptr(it->second.shm_buffer));
it->second.done_cb.Run(state_ == kError ? kDecodeError : kOk);
bitstream_buffers_in_decoder_.erase(it);
}
GpuVideoDecoder::~GpuVideoDecoder() {
+ DVLOG(3) << __FUNCTION__;
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+
if (vda_)
DestroyVDA();
- DCHECK(bitstream_buffers_in_decoder_.empty());
DCHECK(assigned_picture_buffers_.empty());
- if (!pending_reset_cb_.is_null())
- base::ResetAndReturn(&pending_reset_cb_).Run();
-
for (size_t i = 0; i < available_shm_segments_.size(); ++i) {
- available_shm_segments_[i]->shm->Close();
delete available_shm_segments_[i];
}
available_shm_segments_.clear();
@@ -580,9 +528,13 @@ GpuVideoDecoder::~GpuVideoDecoder() {
for (std::map<int32, PendingDecoderBuffer>::iterator it =
bitstream_buffers_in_decoder_.begin();
it != bitstream_buffers_in_decoder_.end(); ++it) {
- it->second.shm_buffer->shm->Close();
+ delete it->second.shm_buffer;
+ it->second.done_cb.Run(kAborted);
}
bitstream_buffers_in_decoder_.clear();
+
+ if (!pending_reset_cb_.is_null())
+ base::ResetAndReturn(&pending_reset_cb_).Run();
}
void GpuVideoDecoder::NotifyFlushDone() {
@@ -617,6 +569,21 @@ void GpuVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
DestroyVDA();
}
+bool GpuVideoDecoder::IsProfileSupported(VideoCodecProfile profile,
+ const gfx::Size& coded_size) {
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ VideoDecodeAccelerator::SupportedProfiles supported_profiles =
+ factories_->GetVideoDecodeAcceleratorSupportedProfiles();
+ for (const auto& supported_profile : supported_profiles) {
+ if (profile == supported_profile.profile) {
+ return IsCodedSizeSupported(coded_size,
+ supported_profile.min_resolution,
+ supported_profile.max_resolution);
+ }
+ }
+ return false;
+}
+
void GpuVideoDecoder::DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent()
const {
DCHECK(factories_->GetTaskRunner()->BelongsToCurrentThread());
diff --git a/chromium/media/filters/gpu_video_decoder.h b/chromium/media/filters/gpu_video_decoder.h
index 2322d30e5eb..49ede615e2b 100644
--- a/chromium/media/filters/gpu_video_decoder.h
+++ b/chromium/media/filters/gpu_video_decoder.h
@@ -64,6 +64,8 @@ class MEDIA_EXPORT GpuVideoDecoder
void NotifyResetDone() override;
void NotifyError(media::VideoDecodeAccelerator::Error error) override;
+ static const char kDecoderName[];
+
protected:
~GpuVideoDecoder() override;
@@ -77,9 +79,9 @@ class MEDIA_EXPORT GpuVideoDecoder
// A shared memory segment and its allocated size.
struct SHMBuffer {
- SHMBuffer(base::SharedMemory* m, size_t s);
+ SHMBuffer(scoped_ptr<base::SharedMemory> m, size_t s);
~SHMBuffer();
- base::SharedMemory* shm;
+ scoped_ptr<base::SharedMemory> shm;
size_t size;
};
@@ -116,15 +118,19 @@ class MEDIA_EXPORT GpuVideoDecoder
void DestroyVDA();
// Request a shared-memory segment of at least |min_size| bytes. Will
- // allocate as necessary. Caller does not own returned pointer.
- SHMBuffer* GetSHM(size_t min_size);
+ // allocate as necessary.
+ scoped_ptr<SHMBuffer> GetSHM(size_t min_size);
// Return a shared-memory segment to the available pool.
- void PutSHM(SHMBuffer* shm_buffer);
+ void PutSHM(scoped_ptr<SHMBuffer> shm_buffer);
// Destroy all PictureBuffers in |buffers|, and delete their textures.
void DestroyPictureBuffers(PictureBufferMap* buffers);
+ // Returns true if the video decoder can support |profile| and |coded_size|.
+ bool IsProfileSupported(VideoCodecProfile profile,
+ const gfx::Size& coded_size);
+
// Assert the contract that this class is operated on the right thread.
void DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent() const;
diff --git a/chromium/media/filters/h264_bitstream_buffer.h b/chromium/media/filters/h264_bitstream_buffer.h
index 4b0511d9bcd..cbe2047452b 100644
--- a/chromium/media/filters/h264_bitstream_buffer.h
+++ b/chromium/media/filters/h264_bitstream_buffer.h
@@ -94,8 +94,8 @@ class MEDIA_EXPORT H264BitstreamBuffer {
kGrowBytes = 4096,
};
- COMPILE_ASSERT(kGrowBytes >= kRegByteSize,
- kGrowBytes_must_be_larger_than_kRegByteSize);
+ static_assert(kGrowBytes >= kRegByteSize,
+ "kGrowBytes must be larger than kRegByteSize");
// Unused bits left in reg_.
size_t bits_left_in_reg_;
diff --git a/chromium/media/filters/h264_parser.cc b/chromium/media/filters/h264_parser.cc
index fd5646d49ec..22d420bcc69 100644
--- a/chromium/media/filters/h264_parser.cc
+++ b/chromium/media/filters/h264_parser.cc
@@ -115,8 +115,8 @@ static const int kTableSarWidth[] = {
static const int kTableSarHeight[] = {
0, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99, 3, 2, 1
};
-COMPILE_ASSERT(arraysize(kTableSarWidth) == arraysize(kTableSarHeight),
- sar_tables_must_have_same_size);
+static_assert(arraysize(kTableSarWidth) == arraysize(kTableSarHeight),
+ "sar tables must have the same size");
H264Parser::H264Parser() {
Reset();
@@ -1085,6 +1085,8 @@ H264Parser::Result H264Parser::ParsePredWeightTable(const H264SPS& sps,
}
H264Parser::Result H264Parser::ParseDecRefPicMarking(H264SliceHeader* shdr) {
+ size_t bits_left_at_start = br_.NumBitsLeft();
+
if (shdr->idr_pic_flag) {
READ_BOOL_OR_RETURN(&shdr->no_output_of_prior_pics_flag);
READ_BOOL_OR_RETURN(&shdr->long_term_reference_flag);
@@ -1126,6 +1128,7 @@ H264Parser::Result H264Parser::ParseDecRefPicMarking(H264SliceHeader* shdr) {
}
}
+ shdr->dec_ref_pic_marking_bit_size = bits_left_at_start - br_.NumBitsLeft();
return kOk;
}
@@ -1172,6 +1175,7 @@ H264Parser::Result H264Parser::ParseSliceHeader(const H264NALU& nalu,
if (shdr->idr_pic_flag)
READ_UE_OR_RETURN(&shdr->idr_pic_id);
+ size_t bits_left_at_pic_order_cnt_start = br_.NumBitsLeft();
if (sps->pic_order_cnt_type == 0) {
READ_BITS_OR_RETURN(sps->log2_max_pic_order_cnt_lsb_minus4 + 4,
&shdr->pic_order_cnt_lsb);
@@ -1181,12 +1185,15 @@ H264Parser::Result H264Parser::ParseSliceHeader(const H264NALU& nalu,
}
if (sps->pic_order_cnt_type == 1 && !sps->delta_pic_order_always_zero_flag) {
- READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt[0]);
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt0);
if (pps->bottom_field_pic_order_in_frame_present_flag &&
!shdr->field_pic_flag)
- READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt[1]);
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt1);
}
+ shdr->pic_order_cnt_bit_size =
+ bits_left_at_pic_order_cnt_start - br_.NumBitsLeft();
+
if (pps->redundant_pic_cnt_present_flag) {
READ_UE_OR_RETURN(&shdr->redundant_pic_cnt);
TRUE_OR_RETURN(shdr->redundant_pic_cnt < 128);
diff --git a/chromium/media/filters/h264_parser.h b/chromium/media/filters/h264_parser.h
index e248db73e57..b8dde5028ca 100644
--- a/chromium/media/filters/h264_parser.h
+++ b/chromium/media/filters/h264_parser.h
@@ -249,7 +249,8 @@ struct MEDIA_EXPORT H264SliceHeader {
int idr_pic_id;
int pic_order_cnt_lsb;
int delta_pic_order_cnt_bottom;
- int delta_pic_order_cnt[2];
+ int delta_pic_order_cnt0;
+ int delta_pic_order_cnt1;
int redundant_pic_cnt;
bool direct_spatial_mv_pred_flag;
@@ -285,6 +286,11 @@ struct MEDIA_EXPORT H264SliceHeader {
int disable_deblocking_filter_idc;
int slice_alpha_c0_offset_div2;
int slice_beta_offset_div2;
+
+ // Calculated.
+ // Size in bits of dec_ref_pic_marking() syntax element.
+ size_t dec_ref_pic_marking_bit_size;
+ size_t pic_order_cnt_bit_size;
};
struct H264SEIRecoveryPoint {
diff --git a/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc b/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc
index 46f3d7b01a0..bbc829969e8 100644
--- a/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc
+++ b/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc
@@ -13,7 +13,7 @@ class H264ToAnnexBBitstreamConverterTest : public testing::Test {
protected:
H264ToAnnexBBitstreamConverterTest() {}
- virtual ~H264ToAnnexBBitstreamConverterTest() {}
+ ~H264ToAnnexBBitstreamConverterTest() override {}
protected:
mp4::AVCDecoderConfigurationRecord avc_config_;
diff --git a/chromium/media/filters/jpeg_parser.cc b/chromium/media/filters/jpeg_parser.cc
new file mode 100644
index 00000000000..ecf8f174e2a
--- /dev/null
+++ b/chromium/media/filters/jpeg_parser.cc
@@ -0,0 +1,395 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/jpeg_parser.h"
+
+#include "base/big_endian.h"
+#include "base/logging.h"
+
+using base::BigEndianReader;
+
+#define READ_U8_OR_RETURN_FALSE(out) \
+ do { \
+ uint8_t _out; \
+ if (!reader.ReadU8(&_out)) { \
+ DVLOG(1) \
+ << "Error in stream: unexpected EOS while trying to read " #out; \
+ return false; \
+ } \
+ *(out) = _out; \
+ } while (0)
+
+#define READ_U16_OR_RETURN_FALSE(out) \
+ do { \
+ uint16_t _out; \
+ if (!reader.ReadU16(&_out)) { \
+ DVLOG(1) \
+ << "Error in stream: unexpected EOS while trying to read " #out; \
+ return false; \
+ } \
+ *(out) = _out; \
+ } while (0)
+
+namespace media {
+
+namespace {
+enum JpegMarker {
+ SOF0 = 0xC0, // start of frame (baseline)
+ DHT = 0xC4, // define huffman table
+ SOI = 0xD8, // start of image
+ SOS = 0xDA, // start of scan
+ DQT = 0xDB, // define quantization table
+ DRI = 0xDD, // define restart internal
+ MARKER1 = 0xFF, // jpeg marker prefix
+};
+}
+
+static bool InRange(int value, int a, int b) {
+ return a <= value && value <= b;
+}
+
+// Round up |value| to multiple of |mul|. |value| must be non-negative.
+// |mul| must be positive.
+static int RoundUp(int value, int mul) {
+ DCHECK_GE(value, 0);
+ DCHECK_GE(mul, 1);
+ return (value + mul - 1) / mul * mul;
+}
+
+// |frame_header| is already initialized to 0 in ParseJpegPicture.
+static bool ParseSOF(const char* buffer,
+ size_t length,
+ JpegFrameHeader* frame_header) {
+ // Spec B.2.2 Frame header syntax
+ DCHECK(buffer);
+ DCHECK(frame_header);
+ BigEndianReader reader(buffer, length);
+
+ uint8_t precision;
+ READ_U8_OR_RETURN_FALSE(&precision);
+ READ_U16_OR_RETURN_FALSE(&frame_header->visible_height);
+ READ_U16_OR_RETURN_FALSE(&frame_header->visible_width);
+ READ_U8_OR_RETURN_FALSE(&frame_header->num_components);
+
+ if (precision != 8) {
+ DLOG(ERROR) << "Only support 8-bit precision, not "
+ << static_cast<int>(precision) << " bit for baseline";
+ return false;
+ }
+ if (!InRange(frame_header->num_components, 1,
+ arraysize(frame_header->components))) {
+ DLOG(ERROR) << "num_components="
+ << static_cast<int>(frame_header->num_components)
+ << " is not supported";
+ return false;
+ }
+
+ int max_h_factor = 0;
+ int max_v_factor = 0;
+ for (size_t i = 0; i < frame_header->num_components; i++) {
+ JpegComponent& component = frame_header->components[i];
+ READ_U8_OR_RETURN_FALSE(&component.id);
+ if (component.id > frame_header->num_components) {
+ DLOG(ERROR) << "component id (" << static_cast<int>(component.id)
+ << ") should be <= num_components ("
+ << static_cast<int>(frame_header->num_components) << ")";
+ return false;
+ }
+ uint8_t hv;
+ READ_U8_OR_RETURN_FALSE(&hv);
+ component.horizontal_sampling_factor = hv / 16;
+ component.vertical_sampling_factor = hv % 16;
+ if (component.horizontal_sampling_factor > max_h_factor)
+ max_h_factor = component.horizontal_sampling_factor;
+ if (component.vertical_sampling_factor > max_v_factor)
+ max_v_factor = component.vertical_sampling_factor;
+ if (!InRange(component.horizontal_sampling_factor, 1, 4)) {
+ DVLOG(1) << "Invalid horizontal sampling factor "
+ << static_cast<int>(component.horizontal_sampling_factor);
+ return false;
+ }
+ if (!InRange(component.vertical_sampling_factor, 1, 4)) {
+ DVLOG(1) << "Invalid vertical sampling factor "
+ << static_cast<int>(component.horizontal_sampling_factor);
+ return false;
+ }
+ READ_U8_OR_RETURN_FALSE(&component.quantization_table_selector);
+ }
+
+ // The size of data unit is 8*8 and the coded size should be extended
+ // to complete minimum coded unit, MCU. See Spec A.2.
+ frame_header->coded_width =
+ RoundUp(frame_header->visible_width, max_h_factor * 8);
+ frame_header->coded_height =
+ RoundUp(frame_header->visible_height, max_v_factor * 8);
+
+ return true;
+}
+
+// |q_table| is already initialized to 0 in ParseJpegPicture.
+static bool ParseDQT(const char* buffer,
+ size_t length,
+ JpegQuantizationTable* q_table) {
+ // Spec B.2.4.1 Quantization table-specification syntax
+ DCHECK(buffer);
+ DCHECK(q_table);
+ BigEndianReader reader(buffer, length);
+ while (reader.remaining() > 0) {
+ uint8_t precision_and_table_id;
+ READ_U8_OR_RETURN_FALSE(&precision_and_table_id);
+ uint8_t precision = precision_and_table_id / 16;
+ uint8_t table_id = precision_and_table_id % 16;
+ if (!InRange(precision, 0, 1)) {
+ DVLOG(1) << "Invalid precision " << static_cast<int>(precision);
+ return false;
+ }
+ if (precision == 1) { // 1 means 16-bit precision
+ DLOG(ERROR) << "An 8-bit DCT-based process shall not use a 16-bit "
+ << "precision quantization table";
+ return false;
+ }
+ if (table_id >= kJpegMaxQuantizationTableNum) {
+ DLOG(ERROR) << "Quantization table id (" << static_cast<int>(table_id)
+ << ") exceeded " << kJpegMaxQuantizationTableNum;
+ return false;
+ }
+
+ if (!reader.ReadBytes(&q_table[table_id].value,
+ sizeof(q_table[table_id].value)))
+ return false;
+ q_table[table_id].valid = true;
+ }
+ return true;
+}
+
+// |dc_table| and |ac_table| are already initialized to 0 in ParseJpegPicture.
+static bool ParseDHT(const char* buffer,
+ size_t length,
+ JpegHuffmanTable* dc_table,
+ JpegHuffmanTable* ac_table) {
+ // Spec B.2.4.2 Huffman table-specification syntax
+ DCHECK(buffer);
+ DCHECK(dc_table);
+ DCHECK(ac_table);
+ BigEndianReader reader(buffer, length);
+ while (reader.remaining() > 0) {
+ uint8_t table_class_and_id;
+ READ_U8_OR_RETURN_FALSE(&table_class_and_id);
+ int table_class = table_class_and_id / 16;
+ int table_id = table_class_and_id % 16;
+ if (!InRange(table_class, 0, 1)) {
+ DVLOG(1) << "Invalid table class " << table_class;
+ return false;
+ }
+ if (table_id >= 2) {
+ DLOG(ERROR) << "Table id(" << table_id
+ << ") >= 2 is invalid for baseline profile";
+ return false;
+ }
+
+ JpegHuffmanTable* table;
+ if (table_class == 1)
+ table = &ac_table[table_id];
+ else
+ table = &dc_table[table_id];
+
+ size_t count = 0;
+ if (!reader.ReadBytes(&table->code_length, sizeof(table->code_length)))
+ return false;
+ for (size_t i = 0; i < arraysize(table->code_length); i++)
+ count += table->code_length[i];
+
+ if (!InRange(count, 0, sizeof(table->code_value))) {
+ DVLOG(1) << "Invalid code count " << count;
+ return false;
+ }
+ if (!reader.ReadBytes(&table->code_value, count))
+ return false;
+ table->valid = true;
+ }
+ return true;
+}
+
+static bool ParseDRI(const char* buffer,
+ size_t length,
+ uint16_t* restart_interval) {
+ // Spec B.2.4.4 Restart interval definition syntax
+ DCHECK(buffer);
+ DCHECK(restart_interval);
+ BigEndianReader reader(buffer, length);
+ return reader.ReadU16(restart_interval) && reader.remaining() == 0;
+}
+
+// |scan| is already initialized to 0 in ParseJpegPicture.
+static bool ParseSOS(const char* buffer,
+ size_t length,
+ const JpegFrameHeader& frame_header,
+ JpegScanHeader* scan) {
+ // Spec B.2.3 Scan header syntax
+ DCHECK(buffer);
+ DCHECK(scan);
+ BigEndianReader reader(buffer, length);
+ READ_U8_OR_RETURN_FALSE(&scan->num_components);
+ if (scan->num_components != frame_header.num_components) {
+ DLOG(ERROR) << "The number of scan components ("
+ << static_cast<int>(scan->num_components)
+ << ") mismatches the number of image components ("
+ << static_cast<int>(frame_header.num_components) << ")";
+ return false;
+ }
+
+ for (int i = 0; i < scan->num_components; i++) {
+ JpegScanHeader::Component* component = &scan->components[i];
+ READ_U8_OR_RETURN_FALSE(&component->component_selector);
+ uint8_t dc_and_ac_selector;
+ READ_U8_OR_RETURN_FALSE(&dc_and_ac_selector);
+ component->dc_selector = dc_and_ac_selector / 16;
+ component->ac_selector = dc_and_ac_selector % 16;
+ if (component->component_selector != frame_header.components[i].id) {
+ DLOG(ERROR) << "component selector mismatches image component id";
+ return false;
+ }
+ if (component->dc_selector >= kJpegMaxHuffmanTableNumBaseline) {
+ DLOG(ERROR) << "DC selector (" << static_cast<int>(component->dc_selector)
+ << ") should be 0 or 1 for baseline mode";
+ return false;
+ }
+ if (component->ac_selector >= kJpegMaxHuffmanTableNumBaseline) {
+ DLOG(ERROR) << "AC selector (" << static_cast<int>(component->ac_selector)
+ << ") should be 0 or 1 for baseline mode";
+ return false;
+ }
+ }
+
+ // Unused fields, only for value checking.
+ uint8_t spectral_selection_start;
+ uint8_t spectral_selection_end;
+ uint8_t point_transform;
+ READ_U8_OR_RETURN_FALSE(&spectral_selection_start);
+ READ_U8_OR_RETURN_FALSE(&spectral_selection_end);
+ READ_U8_OR_RETURN_FALSE(&point_transform);
+ if (spectral_selection_start != 0 || spectral_selection_end != 63) {
+ DLOG(ERROR) << "Spectral selection should be 0,63 for baseline mode";
+ return false;
+ }
+ if (point_transform != 0) {
+ DLOG(ERROR) << "Point transform should be 0 for baseline mode";
+ return false;
+ }
+
+ return true;
+}
+
+// |result| is already initialized to 0 in ParseJpegPicture.
+static bool ParseSOI(const char* buffer,
+ size_t length,
+ JpegParseResult* result) {
+ // Spec B.2.1 High-level syntax
+ DCHECK(buffer);
+ DCHECK(result);
+ BigEndianReader reader(buffer, length);
+ uint8_t marker1;
+ uint8_t marker2;
+ bool has_marker_dqt = false;
+ bool has_marker_sos = false;
+
+ // Once reached SOS, all neccesary data are parsed.
+ while (!has_marker_sos) {
+ READ_U8_OR_RETURN_FALSE(&marker1);
+ if (marker1 != MARKER1)
+ return false;
+
+ do {
+ READ_U8_OR_RETURN_FALSE(&marker2);
+ } while (marker2 == MARKER1); // skip fill bytes
+
+ uint16_t size;
+ READ_U16_OR_RETURN_FALSE(&size);
+ if (reader.remaining() < size) {
+ DLOG(ERROR) << "Ill-formed JPEG. Remaining size (" << reader.remaining()
+ << ") is smaller than header specified (" << size << ")";
+ return false;
+ }
+
+ // The size includes the size field itself.
+ if (size < sizeof(size)) {
+ DLOG(ERROR) << "Ill-formed JPEG. Segment size (" << size
+ << ") is smaller than size field (" << sizeof(size) << ")";
+ return false;
+ }
+ size -= sizeof(size);
+
+ switch (marker2) {
+ case SOF0:
+ if (!ParseSOF(reader.ptr(), size, &result->frame_header)) {
+ DLOG(ERROR) << "ParseSOF failed";
+ return false;
+ }
+ break;
+ case DQT:
+ if (!ParseDQT(reader.ptr(), size, result->q_table)) {
+ DLOG(ERROR) << "ParseDQT failed";
+ return false;
+ }
+ has_marker_dqt = true;
+ break;
+ case DHT:
+ if (!ParseDHT(reader.ptr(), size, result->dc_table, result->ac_table)) {
+ DLOG(ERROR) << "ParseDHT failed";
+ return false;
+ }
+ break;
+ case DRI:
+ if (!ParseDRI(reader.ptr(), size, &result->restart_interval)) {
+ DLOG(ERROR) << "ParseDRI failed";
+ return false;
+ }
+ break;
+ case SOS:
+ if (!ParseSOS(reader.ptr(), size, result->frame_header,
+ &result->scan)) {
+ DLOG(ERROR) << "ParseSOS failed";
+ return false;
+ }
+ has_marker_sos = true;
+ break;
+ default:
+ DVLOG(4) << "unknown marker " << static_cast<int>(marker2);
+ break;
+ }
+ reader.Skip(size);
+ }
+
+ if (!has_marker_dqt) {
+ DLOG(ERROR) << "No DQT marker found";
+ return false;
+ }
+
+ // Scan data follows scan header immediately.
+ result->data = reader.ptr();
+ result->data_size = reader.remaining();
+
+ return true;
+}
+
+bool ParseJpegPicture(const uint8_t* buffer,
+ size_t length,
+ JpegParseResult* result) {
+ DCHECK(buffer);
+ DCHECK(result);
+ BigEndianReader reader(reinterpret_cast<const char*>(buffer), length);
+ memset(result, 0, sizeof(JpegParseResult));
+
+ uint8_t marker1, marker2;
+ READ_U8_OR_RETURN_FALSE(&marker1);
+ READ_U8_OR_RETURN_FALSE(&marker2);
+ if (marker1 != MARKER1 || marker2 != SOI) {
+ DLOG(ERROR) << "Not a JPEG";
+ return false;
+ }
+
+ return ParseSOI(reader.ptr(), reader.remaining(), result);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/jpeg_parser.h b/chromium/media/filters/jpeg_parser.h
new file mode 100644
index 00000000000..f27fa582d2d
--- /dev/null
+++ b/chromium/media/filters/jpeg_parser.h
@@ -0,0 +1,83 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef MEDIA_FILTERS_JPEG_PARSER_H_
+#define MEDIA_FILTERS_JPEG_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include "media/base/media_export.h"
+
+namespace media {
+
+const size_t kJpegMaxHuffmanTableNumBaseline = 2;
+const size_t kJpegMaxComponents = 4;
+const size_t kJpegMaxQuantizationTableNum = 4;
+
+// Parsing result of JPEG DHT marker.
+struct JpegHuffmanTable {
+ bool valid;
+ uint8_t code_length[16];
+ uint8_t code_value[256];
+};
+
+// Parsing result of JPEG DQT marker.
+struct JpegQuantizationTable {
+ bool valid;
+ uint8_t value[64]; // baseline only supports 8 bits quantization table
+};
+
+// Parsing result of a JPEG component.
+struct JpegComponent {
+ uint8_t id;
+ uint8_t horizontal_sampling_factor;
+ uint8_t vertical_sampling_factor;
+ uint8_t quantization_table_selector;
+};
+
+// Parsing result of a JPEG SOF marker.
+struct JpegFrameHeader {
+ uint16_t visible_width;
+ uint16_t visible_height;
+ uint16_t coded_width;
+ uint16_t coded_height;
+ uint8_t num_components;
+ JpegComponent components[kJpegMaxComponents];
+};
+
+// Parsing result of JPEG SOS marker.
+struct JpegScanHeader {
+ uint8_t num_components;
+ struct Component {
+ uint8_t component_selector;
+ uint8_t dc_selector;
+ uint8_t ac_selector;
+ } components[kJpegMaxComponents];
+};
+
+struct JpegParseResult {
+ JpegFrameHeader frame_header;
+ JpegHuffmanTable dc_table[kJpegMaxHuffmanTableNumBaseline];
+ JpegHuffmanTable ac_table[kJpegMaxHuffmanTableNumBaseline];
+ JpegQuantizationTable q_table[kJpegMaxQuantizationTableNum];
+ uint16_t restart_interval;
+ JpegScanHeader scan;
+ const char* data;
+ size_t data_size;
+};
+
+// Parses JPEG picture in |buffer| with |length|. Returns true iff header is
+// valid and JPEG baseline sequential process is present. If parsed
+// successfully, |result| is the parsed result.
+// It's not a full featured JPEG parser implememtation. It only parses JPEG
+// baseline sequential process. For explanations of each struct and its
+// members, see JPEG specification at
+// http://www.w3.org/Graphics/JPEG/itu-t81.pdf.
+MEDIA_EXPORT bool ParseJpegPicture(const uint8_t* buffer,
+ size_t length,
+ JpegParseResult* result);
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_JPEG_PARSER_H_
diff --git a/chromium/media/filters/jpeg_parser_unittest.cc b/chromium/media/filters/jpeg_parser_unittest.cc
new file mode 100644
index 00000000000..f2ae889a4a3
--- /dev/null
+++ b/chromium/media/filters/jpeg_parser_unittest.cc
@@ -0,0 +1,112 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/path_service.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/jpeg_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(JpegParserTest, Parsing) {
+ base::FilePath data_dir;
+ ASSERT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &data_dir));
+
+ // This sample frame is captured from Chromebook Pixel
+ base::FilePath file_path = data_dir.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
+ .AppendASCII("pixel-1280x720.jpg");
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ JpegParseResult result;
+ ASSERT_TRUE(ParseJpegPicture(stream.data(), stream.length(), &result));
+
+ // Verify selected fields
+
+ // SOF fields
+ EXPECT_EQ(1280, result.frame_header.visible_width);
+ EXPECT_EQ(720, result.frame_header.visible_height);
+ EXPECT_EQ(1280, result.frame_header.coded_width);
+ EXPECT_EQ(720, result.frame_header.coded_height);
+ EXPECT_EQ(3, result.frame_header.num_components);
+ EXPECT_EQ(1, result.frame_header.components[0].id);
+ EXPECT_EQ(2, result.frame_header.components[0].horizontal_sampling_factor);
+ EXPECT_EQ(1, result.frame_header.components[0].vertical_sampling_factor);
+ EXPECT_EQ(0, result.frame_header.components[0].quantization_table_selector);
+ EXPECT_EQ(2, result.frame_header.components[1].id);
+ EXPECT_EQ(1, result.frame_header.components[1].horizontal_sampling_factor);
+ EXPECT_EQ(1, result.frame_header.components[1].vertical_sampling_factor);
+ EXPECT_EQ(1, result.frame_header.components[1].quantization_table_selector);
+ EXPECT_EQ(3, result.frame_header.components[2].id);
+ EXPECT_EQ(1, result.frame_header.components[2].horizontal_sampling_factor);
+ EXPECT_EQ(1, result.frame_header.components[2].vertical_sampling_factor);
+ EXPECT_EQ(1, result.frame_header.components[2].quantization_table_selector);
+
+ // DRI fields
+ EXPECT_EQ(0, result.restart_interval);
+
+ // DQT fields
+ EXPECT_TRUE(result.q_table[0].valid);
+ EXPECT_TRUE(result.q_table[1].valid);
+ EXPECT_FALSE(result.q_table[2].valid);
+ EXPECT_FALSE(result.q_table[3].valid);
+
+ // DHT fields (no DHT marker)
+ EXPECT_FALSE(result.dc_table[0].valid);
+ EXPECT_FALSE(result.ac_table[0].valid);
+ EXPECT_FALSE(result.dc_table[1].valid);
+ EXPECT_FALSE(result.ac_table[1].valid);
+
+ // SOS fields
+ EXPECT_EQ(3, result.scan.num_components);
+ EXPECT_EQ(1, result.scan.components[0].component_selector);
+ EXPECT_EQ(0, result.scan.components[0].dc_selector);
+ EXPECT_EQ(0, result.scan.components[0].ac_selector);
+ EXPECT_EQ(2, result.scan.components[1].component_selector);
+ EXPECT_EQ(1, result.scan.components[1].dc_selector);
+ EXPECT_EQ(1, result.scan.components[1].ac_selector);
+ EXPECT_EQ(3, result.scan.components[2].component_selector);
+ EXPECT_EQ(1, result.scan.components[2].dc_selector);
+ EXPECT_EQ(1, result.scan.components[2].ac_selector);
+ EXPECT_EQ(121150u, result.data_size);
+}
+
+TEST(JpegParserTest, CodedSizeNotEqualVisibleSize) {
+ base::FilePath data_dir;
+ ASSERT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &data_dir));
+
+ base::FilePath file_path = data_dir.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
+ .AppendASCII("blank-1x1.jpg");
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ JpegParseResult result;
+ ASSERT_TRUE(ParseJpegPicture(stream.data(), stream.length(), &result));
+
+ EXPECT_EQ(1, result.frame_header.visible_width);
+ EXPECT_EQ(1, result.frame_header.visible_height);
+ // The sampling factor of the given image is 2:2, so coded size is 16x16
+ EXPECT_EQ(16, result.frame_header.coded_width);
+ EXPECT_EQ(16, result.frame_header.coded_height);
+ EXPECT_EQ(2, result.frame_header.components[0].horizontal_sampling_factor);
+ EXPECT_EQ(2, result.frame_header.components[0].vertical_sampling_factor);
+}
+
+TEST(JpegParserTest, ParsingFail) {
+ const uint8_t data[] = {0, 1, 2, 3}; // not jpeg
+ JpegParseResult result;
+ ASSERT_FALSE(ParseJpegPicture(data, sizeof(data), &result));
+}
+
+} // namespace media
diff --git a/chromium/media/filters/mock_gpu_video_accelerator_factories.cc b/chromium/media/filters/mock_gpu_video_accelerator_factories.cc
deleted file mode 100644
index eeb3ba6dce0..00000000000
--- a/chromium/media/filters/mock_gpu_video_accelerator_factories.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/mock_gpu_video_accelerator_factories.h"
-
-namespace media {
-
-MockGpuVideoAcceleratorFactories::MockGpuVideoAcceleratorFactories() {}
-
-MockGpuVideoAcceleratorFactories::~MockGpuVideoAcceleratorFactories() {}
-
-scoped_ptr<VideoDecodeAccelerator>
-MockGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator() {
- return scoped_ptr<VideoDecodeAccelerator>(DoCreateVideoDecodeAccelerator());
-}
-
-scoped_ptr<VideoEncodeAccelerator>
-MockGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator() {
- return scoped_ptr<VideoEncodeAccelerator>(DoCreateVideoEncodeAccelerator());
-}
-
-} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_perftest.cc b/chromium/media/filters/pipeline_integration_perftest.cc
deleted file mode 100644
index d84bd136a08..00000000000
--- a/chromium/media/filters/pipeline_integration_perftest.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/test_data_util.h"
-#include "media/filters/pipeline_integration_test_base.h"
-#include "testing/perf/perf_test.h"
-
-namespace media {
-
-static const int kBenchmarkIterationsAudio = 200;
-static const int kBenchmarkIterationsVideo = 20;
-
-static void RunPlaybackBenchmark(const std::string& filename,
- const std::string& name,
- int iterations,
- bool audio_only) {
- double time_seconds = 0.0;
-
- for (int i = 0; i < iterations; ++i) {
- PipelineIntegrationTestBase pipeline;
-
- ASSERT_TRUE(pipeline.Start(GetTestDataFilePath(filename),
- PIPELINE_OK,
- PipelineIntegrationTestBase::kClockless));
-
- base::TimeTicks start = base::TimeTicks::HighResNow();
- pipeline.Play();
-
- ASSERT_TRUE(pipeline.WaitUntilOnEnded());
-
- // Call Stop() to ensure that the rendering is complete.
- pipeline.Stop();
-
- if (audio_only) {
- time_seconds += pipeline.GetAudioTime().InSecondsF();
- } else {
- time_seconds += (base::TimeTicks::HighResNow() - start).InSecondsF();
- }
- }
-
- perf_test::PrintResult(name,
- "",
- filename,
- iterations / time_seconds,
- "runs/s",
- true);
-}
-
-static void RunVideoPlaybackBenchmark(const std::string& filename,
- const std::string name) {
- RunPlaybackBenchmark(filename, name, kBenchmarkIterationsVideo, false);
-}
-
-static void RunAudioPlaybackBenchmark(const std::string& filename,
- const std::string& name) {
- RunPlaybackBenchmark(filename, name, kBenchmarkIterationsAudio, true);
-}
-
-TEST(PipelineIntegrationPerfTest, AudioPlaybackBenchmark) {
- RunAudioPlaybackBenchmark("sfx_f32le.wav", "clockless_playback");
- RunAudioPlaybackBenchmark("sfx_s24le.wav", "clockless_playback");
- RunAudioPlaybackBenchmark("sfx_s16le.wav", "clockless_playback");
- RunAudioPlaybackBenchmark("sfx_u8.wav", "clockless_playback");
-#if defined(USE_PROPRIETARY_CODECS)
- RunAudioPlaybackBenchmark("sfx.mp3", "clockless_playback");
-#endif
-}
-
-TEST(PipelineIntegrationPerfTest, VP8PlaybackBenchmark) {
- RunVideoPlaybackBenchmark("bear_silent.webm", "clockless_video_playback_vp8");
-}
-
-TEST(PipelineIntegrationPerfTest, VP9PlaybackBenchmark) {
- RunVideoPlaybackBenchmark("bear-vp9.webm", "clockless_video_playback_vp9");
-}
-
-TEST(PipelineIntegrationPerfTest, TheoraPlaybackBenchmark) {
- RunVideoPlaybackBenchmark("bear_silent.ogv",
- "clockless_video_playback_theora");
-}
-
-#if defined(USE_PROPRIETARY_CODECS)
-TEST(PipelineIntegrationPerfTest, MP4PlaybackBenchmark) {
- RunVideoPlaybackBenchmark("bear_silent.mp4", "clockless_video_playback_mp4");
-}
-#endif
-
-} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test.cc b/chromium/media/filters/pipeline_integration_test.cc
deleted file mode 100644
index bd026da87e9..00000000000
--- a/chromium/media/filters/pipeline_integration_test.cc
+++ /dev/null
@@ -1,1615 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/pipeline_integration_test_base.h"
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_util.h"
-#include "build/build_config.h"
-#include "media/base/cdm_callback_promise.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/media_keys.h"
-#include "media/base/media_switches.h"
-#include "media/base/test_data_util.h"
-#include "media/cdm/aes_decryptor.h"
-#include "media/cdm/json_web_key.h"
-#include "media/filters/chunk_demuxer.h"
-#include "media/filters/renderer_impl.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-using testing::_;
-using testing::AnyNumber;
-using testing::AtLeast;
-using testing::AtMost;
-using testing::SaveArg;
-
-namespace media {
-
-const char kSourceId[] = "SourceId";
-const char kCencInitDataType[] = "cenc";
-const uint8 kInitData[] = { 0x69, 0x6e, 0x69, 0x74 };
-
-const char kWebM[] = "video/webm; codecs=\"vp8,vorbis\"";
-const char kWebMVP9[] = "video/webm; codecs=\"vp9\"";
-const char kAudioOnlyWebM[] = "video/webm; codecs=\"vorbis\"";
-const char kOpusAudioOnlyWebM[] = "video/webm; codecs=\"opus\"";
-const char kVideoOnlyWebM[] = "video/webm; codecs=\"vp8\"";
-#if defined(USE_PROPRIETARY_CODECS)
-const char kADTS[] = "audio/aac";
-const char kMP4[] = "video/mp4; codecs=\"avc1.4D4041,mp4a.40.2\"";
-const char kMP4Video[] = "video/mp4; codecs=\"avc1.4D4041\"";
-const char kMP4VideoAVC3[] = "video/mp4; codecs=\"avc3.64001f\"";
-const char kMP4Audio[] = "audio/mp4; codecs=\"mp4a.40.2\"";
-const char kMP3[] = "audio/mpeg";
-#endif // defined(USE_PROPRIETARY_CODECS)
-
-// Key used to encrypt test files.
-const uint8 kSecretKey[] = {
- 0xeb, 0xdd, 0x62, 0xf1, 0x68, 0x14, 0xd2, 0x7b,
- 0x68, 0xef, 0x12, 0x2a, 0xfc, 0xe4, 0xae, 0x3c
-};
-
-// The key ID for all encrypted files.
-const uint8 kKeyId[] = {
- 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
- 0x38, 0x39, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35
-};
-
-const int kAppendWholeFile = -1;
-
-// Constants for the Media Source config change tests.
-const int kAppendTimeSec = 1;
-const int kAppendTimeMs = kAppendTimeSec * 1000;
-const int k320WebMFileDurationMs = 2736;
-const int k640WebMFileDurationMs = 2749;
-const int kOpusEndTrimmingWebMFileDurationMs = 2741;
-const int kVP9WebMFileDurationMs = 2736;
-const int kVP8AWebMFileDurationMs = 2733;
-
-#if defined(USE_PROPRIETARY_CODECS)
-const int k640IsoFileDurationMs = 2737;
-const int k640IsoCencFileDurationMs = 2736;
-const int k1280IsoFileDurationMs = 2736;
-const int k1280IsoAVC3FileDurationMs = 2736;
-#endif // defined(USE_PROPRIETARY_CODECS)
-
-// Return a timeline offset for bear-320x240-live.webm.
-static base::Time kLiveTimelineOffset() {
- // The file contians the following UTC timeline offset:
- // 2012-11-10 12:34:56.789123456
- // Since base::Time only has a resolution of microseconds,
- // construct a base::Time for 2012-11-10 12:34:56.789123.
- base::Time::Exploded exploded_time;
- exploded_time.year = 2012;
- exploded_time.month = 11;
- exploded_time.day_of_month = 10;
- exploded_time.hour = 12;
- exploded_time.minute = 34;
- exploded_time.second = 56;
- exploded_time.millisecond = 789;
- base::Time timeline_offset = base::Time::FromUTCExploded(exploded_time);
-
- timeline_offset += base::TimeDelta::FromMicroseconds(123);
-
- return timeline_offset;
-}
-
-// FFmpeg only supports time a resolution of seconds so this
-// helper function truncates a base::Time to seconds resolution.
-static base::Time TruncateToFFmpegTimeResolution(base::Time t) {
- base::Time::Exploded exploded_time;
- t.UTCExplode(&exploded_time);
- exploded_time.millisecond = 0;
-
- return base::Time::FromUTCExploded(exploded_time);
-}
-
-// Note: Tests using this class only exercise the DecryptingDemuxerStream path.
-// They do not exercise the Decrypting{Audio|Video}Decoder path.
-class FakeEncryptedMedia {
- public:
- // Defines the behavior of the "app" that responds to EME events.
- class AppBase {
- public:
- virtual ~AppBase() {}
-
- virtual void OnSessionMessage(const std::string& web_session_id,
- const std::vector<uint8>& message,
- const GURL& destination_url) = 0;
-
- virtual void OnSessionClosed(const std::string& web_session_id) = 0;
-
- virtual void OnSessionKeysChange(const std::string& web_session_id,
- bool has_additional_usable_key) = 0;
-
- // Errors are not expected unless overridden.
- virtual void OnSessionError(const std::string& web_session_id,
- const std::string& error_name,
- uint32 system_code,
- const std::string& error_message) {
- FAIL() << "Unexpected Key Error";
- }
-
- virtual void NeedKey(const std::string& type,
- const std::vector<uint8>& init_data,
- AesDecryptor* decryptor) = 0;
- };
-
- FakeEncryptedMedia(AppBase* app)
- : decryptor_(base::Bind(&FakeEncryptedMedia::OnSessionMessage,
- base::Unretained(this)),
- base::Bind(&FakeEncryptedMedia::OnSessionClosed,
- base::Unretained(this)),
- base::Bind(&FakeEncryptedMedia::OnSessionKeysChange,
- base::Unretained(this))),
- app_(app) {}
-
- AesDecryptor* decryptor() {
- return &decryptor_;
- }
-
- // Callbacks for firing session events. Delegate to |app_|.
- void OnSessionMessage(const std::string& web_session_id,
- const std::vector<uint8>& message,
- const GURL& destination_url) {
- app_->OnSessionMessage(web_session_id, message, destination_url);
- }
-
- void OnSessionClosed(const std::string& web_session_id) {
- app_->OnSessionClosed(web_session_id);
- }
-
- void OnSessionKeysChange(const std::string& web_session_id,
- bool has_additional_usable_key) {
- app_->OnSessionKeysChange(web_session_id, has_additional_usable_key);
- }
-
- void OnSessionError(const std::string& web_session_id,
- const std::string& error_name,
- uint32 system_code,
- const std::string& error_message) {
- app_->OnSessionError(
- web_session_id, error_name, system_code, error_message);
- }
-
- void NeedKey(const std::string& type,
- const std::vector<uint8>& init_data) {
- app_->NeedKey(type, init_data, &decryptor_);
- }
-
- private:
- AesDecryptor decryptor_;
- scoped_ptr<AppBase> app_;
-};
-
-enum PromiseResult { RESOLVED, REJECTED };
-
-// Provides |kSecretKey| in response to needkey.
-class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
- public:
- KeyProvidingApp() {}
-
- void OnResolveWithSession(PromiseResult expected,
- const std::string& web_session_id) {
- EXPECT_EQ(expected, RESOLVED);
- EXPECT_GT(web_session_id.length(), 0ul);
- current_session_id_ = web_session_id;
- }
-
- void OnResolve(PromiseResult expected) {
- EXPECT_EQ(expected, RESOLVED);
- }
-
- void OnReject(PromiseResult expected,
- media::MediaKeys::Exception exception_code,
- uint32 system_code,
- const std::string& error_message) {
- EXPECT_EQ(expected, REJECTED);
- }
-
- scoped_ptr<SimpleCdmPromise> CreatePromise(PromiseResult expected) {
- scoped_ptr<media::SimpleCdmPromise> promise(new media::CdmCallbackPromise<>(
- base::Bind(
- &KeyProvidingApp::OnResolve, base::Unretained(this), expected),
- base::Bind(
- &KeyProvidingApp::OnReject, base::Unretained(this), expected)));
- return promise.Pass();
- }
-
- scoped_ptr<NewSessionCdmPromise> CreateSessionPromise(
- PromiseResult expected) {
- scoped_ptr<media::NewSessionCdmPromise> promise(
- new media::CdmCallbackPromise<std::string>(
- base::Bind(&KeyProvidingApp::OnResolveWithSession,
- base::Unretained(this),
- expected),
- base::Bind(
- &KeyProvidingApp::OnReject, base::Unretained(this), expected)));
- return promise.Pass();
- }
-
- void OnSessionMessage(const std::string& web_session_id,
- const std::vector<uint8>& message,
- const GURL& destination_url) override {
- EXPECT_FALSE(web_session_id.empty());
- EXPECT_FALSE(message.empty());
- EXPECT_EQ(current_session_id_, web_session_id);
- }
-
- void OnSessionClosed(const std::string& web_session_id) override {
- EXPECT_EQ(current_session_id_, web_session_id);
- }
-
- void OnSessionKeysChange(const std::string& web_session_id,
- bool has_additional_usable_key) override {
- EXPECT_EQ(current_session_id_, web_session_id);
- EXPECT_EQ(has_additional_usable_key, true);
- }
-
- void NeedKey(const std::string& type,
- const std::vector<uint8>& init_data,
- AesDecryptor* decryptor) override {
- if (current_session_id_.empty()) {
- decryptor->CreateSession(type,
- kInitData,
- arraysize(kInitData),
- MediaKeys::TEMPORARY_SESSION,
- CreateSessionPromise(RESOLVED));
- EXPECT_FALSE(current_session_id_.empty());
- }
-
- // Clear Key really needs the key ID in |init_data|. For WebM, they are the
- // same, but this is not the case for ISO CENC. Therefore, provide the
- // correct key ID.
- const uint8* key_id = init_data.empty() ? NULL : &init_data[0];
- size_t key_id_length = init_data.size();
- if (type == kCencInitDataType) {
- key_id = kKeyId;
- key_id_length = arraysize(kKeyId);
- }
-
- // Convert key into a JSON structure and then add it.
- std::string jwk = GenerateJWKSet(
- kSecretKey, arraysize(kSecretKey), key_id, key_id_length);
- decryptor->UpdateSession(current_session_id_,
- reinterpret_cast<const uint8*>(jwk.data()),
- jwk.size(),
- CreatePromise(RESOLVED));
- }
-
- std::string current_session_id_;
-};
-
-class RotatingKeyProvidingApp : public KeyProvidingApp {
- public:
- RotatingKeyProvidingApp() : num_distint_need_key_calls_(0) {}
- ~RotatingKeyProvidingApp() override {
- // Expect that NeedKey is fired multiple times with different |init_data|.
- EXPECT_GT(num_distint_need_key_calls_, 1u);
- }
-
- void NeedKey(const std::string& type,
- const std::vector<uint8>& init_data,
- AesDecryptor* decryptor) override {
- // Skip the request if the |init_data| has been seen.
- if (init_data == prev_init_data_)
- return;
- prev_init_data_ = init_data;
- ++num_distint_need_key_calls_;
-
- decryptor->CreateSession(type,
- vector_as_array(&init_data),
- init_data.size(),
- MediaKeys::TEMPORARY_SESSION,
- CreateSessionPromise(RESOLVED));
-
- std::vector<uint8> key_id;
- std::vector<uint8> key;
- EXPECT_TRUE(GetKeyAndKeyId(init_data, &key, &key_id));
-
- // Convert key into a JSON structure and then add it.
- std::string jwk = GenerateJWKSet(vector_as_array(&key),
- key.size(),
- vector_as_array(&key_id),
- key_id.size());
- decryptor->UpdateSession(current_session_id_,
- reinterpret_cast<const uint8*>(jwk.data()),
- jwk.size(),
- CreatePromise(RESOLVED));
- }
-
- private:
- bool GetKeyAndKeyId(std::vector<uint8> init_data,
- std::vector<uint8>* key,
- std::vector<uint8>* key_id) {
- // For WebM, init_data is key_id; for ISO CENC, init_data should contain
- // the key_id. We assume key_id is in the end of init_data here (that is
- // only a reasonable assumption for WebM and clear key ISO CENC).
- DCHECK_GE(init_data.size(), arraysize(kKeyId));
- std::vector<uint8> key_id_from_init_data(
- init_data.end() - arraysize(kKeyId), init_data.end());
-
- key->assign(kSecretKey, kSecretKey + arraysize(kSecretKey));
- key_id->assign(kKeyId, kKeyId + arraysize(kKeyId));
-
- // The Key and KeyId for this testing key provider are created by left
- // rotating kSecretKey and kKeyId. Note that this implementation is only
- // intended for testing purpose. The actual key rotation algorithm can be
- // much more complicated.
- // Find out the rotating position from |key_id_from_init_data| and apply on
- // |key|.
- for (size_t pos = 0; pos < arraysize(kKeyId); ++pos) {
- std::rotate(key_id->begin(), key_id->begin() + pos, key_id->end());
- if (*key_id == key_id_from_init_data) {
- std::rotate(key->begin(), key->begin() + pos, key->end());
- return true;
- }
- }
- return false;
- }
-
- std::vector<uint8> prev_init_data_;
- uint32 num_distint_need_key_calls_;
-};
-
-// Ignores needkey and does not perform a license request
-class NoResponseApp : public FakeEncryptedMedia::AppBase {
- public:
- void OnSessionMessage(const std::string& web_session_id,
- const std::vector<uint8>& message,
- const GURL& default_url) override {
- EXPECT_FALSE(web_session_id.empty());
- EXPECT_FALSE(message.empty());
- FAIL() << "Unexpected Message";
- }
-
- void OnSessionClosed(const std::string& web_session_id) override {
- EXPECT_FALSE(web_session_id.empty());
- FAIL() << "Unexpected Closed";
- }
-
- void OnSessionKeysChange(const std::string& web_session_id,
- bool has_additional_usable_key) override {
- EXPECT_FALSE(web_session_id.empty());
- EXPECT_EQ(has_additional_usable_key, true);
- }
-
- void NeedKey(const std::string& type,
- const std::vector<uint8>& init_data,
- AesDecryptor* decryptor) override {}
-};
-
-// Helper class that emulates calls made on the ChunkDemuxer by the
-// Media Source API.
-class MockMediaSource {
- public:
- MockMediaSource(const std::string& filename,
- const std::string& mimetype,
- int initial_append_size)
- : file_path_(GetTestDataFilePath(filename)),
- current_position_(0),
- initial_append_size_(initial_append_size),
- mimetype_(mimetype),
- chunk_demuxer_(new ChunkDemuxer(
- base::Bind(&MockMediaSource::DemuxerOpened, base::Unretained(this)),
- base::Bind(&MockMediaSource::DemuxerNeedKey,
- base::Unretained(this)),
- LogCB(),
- true)),
- owned_chunk_demuxer_(chunk_demuxer_) {
-
- file_data_ = ReadTestDataFile(filename);
-
- if (initial_append_size_ == kAppendWholeFile)
- initial_append_size_ = file_data_->data_size();
-
- DCHECK_GT(initial_append_size_, 0);
- DCHECK_LE(initial_append_size_, file_data_->data_size());
- }
-
- virtual ~MockMediaSource() {}
-
- scoped_ptr<Demuxer> GetDemuxer() { return owned_chunk_demuxer_.Pass(); }
-
- void set_need_key_cb(const Demuxer::NeedKeyCB& need_key_cb) {
- need_key_cb_ = need_key_cb;
- }
-
- void Seek(base::TimeDelta seek_time, int new_position, int seek_append_size) {
- chunk_demuxer_->StartWaitingForSeek(seek_time);
-
- chunk_demuxer_->Abort(
- kSourceId,
- base::TimeDelta(), kInfiniteDuration(), &last_timestamp_offset_);
-
- DCHECK_GE(new_position, 0);
- DCHECK_LT(new_position, file_data_->data_size());
- current_position_ = new_position;
-
- AppendData(seek_append_size);
- }
-
- void AppendData(int size) {
- DCHECK(chunk_demuxer_);
- DCHECK_LT(current_position_, file_data_->data_size());
- DCHECK_LE(current_position_ + size, file_data_->data_size());
-
- chunk_demuxer_->AppendData(
- kSourceId, file_data_->data() + current_position_, size,
- base::TimeDelta(), kInfiniteDuration(), &last_timestamp_offset_,
- base::Bind(&MockMediaSource::InitSegmentReceived,
- base::Unretained(this)));
- current_position_ += size;
- }
-
- void AppendAtTime(base::TimeDelta timestamp_offset,
- const uint8* pData,
- int size) {
- CHECK(!chunk_demuxer_->IsParsingMediaSegment(kSourceId));
- chunk_demuxer_->AppendData(kSourceId, pData, size,
- base::TimeDelta(), kInfiniteDuration(),
- &timestamp_offset,
- base::Bind(&MockMediaSource::InitSegmentReceived,
- base::Unretained(this)));
- last_timestamp_offset_ = timestamp_offset;
- }
-
- void AppendAtTimeWithWindow(base::TimeDelta timestamp_offset,
- base::TimeDelta append_window_start,
- base::TimeDelta append_window_end,
- const uint8* pData,
- int size) {
- CHECK(!chunk_demuxer_->IsParsingMediaSegment(kSourceId));
- chunk_demuxer_->AppendData(kSourceId,
- pData,
- size,
- append_window_start,
- append_window_end,
- &timestamp_offset,
- base::Bind(&MockMediaSource::InitSegmentReceived,
- base::Unretained(this)));
- last_timestamp_offset_ = timestamp_offset;
- }
-
- void EndOfStream() {
- chunk_demuxer_->MarkEndOfStream(PIPELINE_OK);
- }
-
- void Abort() {
- if (!chunk_demuxer_)
- return;
- chunk_demuxer_->Shutdown();
- chunk_demuxer_ = NULL;
- }
-
- void DemuxerOpened() {
- base::MessageLoop::current()->PostTask(
- FROM_HERE, base::Bind(&MockMediaSource::DemuxerOpenedTask,
- base::Unretained(this)));
- }
-
- void DemuxerOpenedTask() {
- // This code assumes that |mimetype_| is one of the following forms.
- // 1. audio/mpeg
- // 2. video/webm;codec="vorbis,vp8".
- size_t semicolon = mimetype_.find(";");
- std::string type = mimetype_;
- std::vector<std::string> codecs;
- if (semicolon != std::string::npos) {
- type = mimetype_.substr(0, semicolon);
- size_t codecs_param_start = mimetype_.find("codecs=\"", semicolon);
-
- CHECK_NE(codecs_param_start, std::string::npos);
-
- codecs_param_start += 8; // Skip over the codecs=".
-
- size_t codecs_param_end = mimetype_.find("\"", codecs_param_start);
-
- CHECK_NE(codecs_param_end, std::string::npos);
-
- std::string codecs_param =
- mimetype_.substr(codecs_param_start,
- codecs_param_end - codecs_param_start);
- Tokenize(codecs_param, ",", &codecs);
- }
-
- CHECK_EQ(chunk_demuxer_->AddId(kSourceId, type, codecs), ChunkDemuxer::kOk);
-
- AppendData(initial_append_size_);
- }
-
- void DemuxerNeedKey(const std::string& type,
- const std::vector<uint8>& init_data) {
- DCHECK(!init_data.empty());
- CHECK(!need_key_cb_.is_null());
- need_key_cb_.Run(type, init_data);
- }
-
- base::TimeDelta last_timestamp_offset() const {
- return last_timestamp_offset_;
- }
-
- MOCK_METHOD0(InitSegmentReceived, void(void));
-
- private:
- base::FilePath file_path_;
- scoped_refptr<DecoderBuffer> file_data_;
- int current_position_;
- int initial_append_size_;
- std::string mimetype_;
- ChunkDemuxer* chunk_demuxer_;
- scoped_ptr<Demuxer> owned_chunk_demuxer_;
- Demuxer::NeedKeyCB need_key_cb_;
- base::TimeDelta last_timestamp_offset_;
-};
-
-class PipelineIntegrationTest
- : public testing::Test,
- public PipelineIntegrationTestBase {
- public:
- void StartPipelineWithMediaSource(MockMediaSource* source) {
- EXPECT_CALL(*source, InitSegmentReceived()).Times(AtLeast(1));
- EXPECT_CALL(*this, OnMetadata(_))
- .Times(AtMost(1))
- .WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
- .Times(AtMost(1));
- demuxer_ = source->GetDemuxer().Pass();
- pipeline_->Start(
- demuxer_.get(),
- CreateRenderer(NULL),
- base::Bind(&PipelineIntegrationTest::OnEnded, base::Unretained(this)),
- base::Bind(&PipelineIntegrationTest::OnError, base::Unretained(this)),
- QuitOnStatusCB(PIPELINE_OK),
- base::Bind(&PipelineIntegrationTest::OnMetadata,
- base::Unretained(this)),
- base::Bind(&PipelineIntegrationTest::OnBufferingStateChanged,
- base::Unretained(this)),
- base::Closure(),
- base::Bind(&PipelineIntegrationTest::OnAddTextTrack,
- base::Unretained(this)));
- message_loop_.Run();
- }
-
- void StartHashedPipelineWithMediaSource(MockMediaSource* source) {
- hashing_enabled_ = true;
- StartPipelineWithMediaSource(source);
- }
-
- void StartPipelineWithEncryptedMedia(
- MockMediaSource* source,
- FakeEncryptedMedia* encrypted_media) {
- EXPECT_CALL(*source, InitSegmentReceived()).Times(AtLeast(1));
- EXPECT_CALL(*this, OnMetadata(_))
- .Times(AtMost(1))
- .WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
- .Times(AtMost(1));
- demuxer_ = source->GetDemuxer().Pass();
- pipeline_->Start(
- demuxer_.get(),
- CreateRenderer(encrypted_media->decryptor()),
- base::Bind(&PipelineIntegrationTest::OnEnded, base::Unretained(this)),
- base::Bind(&PipelineIntegrationTest::OnError, base::Unretained(this)),
- QuitOnStatusCB(PIPELINE_OK),
- base::Bind(&PipelineIntegrationTest::OnMetadata,
- base::Unretained(this)),
- base::Bind(&PipelineIntegrationTest::OnBufferingStateChanged,
- base::Unretained(this)),
- base::Closure(),
- base::Bind(&PipelineIntegrationTest::OnAddTextTrack,
- base::Unretained(this)));
-
- source->set_need_key_cb(base::Bind(&FakeEncryptedMedia::NeedKey,
- base::Unretained(encrypted_media)));
-
- message_loop_.Run();
- }
-
- // Verifies that seeking works properly for ChunkDemuxer when the
- // seek happens while there is a pending read on the ChunkDemuxer
- // and no data is available.
- bool TestSeekDuringRead(const std::string& filename,
- const std::string& mimetype,
- int initial_append_size,
- base::TimeDelta start_seek_time,
- base::TimeDelta seek_time,
- int seek_file_position,
- int seek_append_size) {
- MockMediaSource source(filename, mimetype, initial_append_size);
- StartPipelineWithMediaSource(&source);
-
- if (pipeline_status_ != PIPELINE_OK)
- return false;
-
- Play();
- if (!WaitUntilCurrentTimeIsAfter(start_seek_time))
- return false;
-
- source.Seek(seek_time, seek_file_position, seek_append_size);
- if (!Seek(seek_time))
- return false;
-
- source.EndOfStream();
-
- source.Abort();
- Stop();
- return true;
- }
-};
-
-TEST_F(PipelineIntegrationTest, BasicPlayback) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240.webm"), PIPELINE_OK));
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlaybackOpusOgg) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-opus.ogg"), PIPELINE_OK));
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlaybackHashed) {
- ASSERT_TRUE(Start(
- GetTestDataFilePath("bear-320x240.webm"), PIPELINE_OK, kHashed));
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-
- EXPECT_EQ("f0be120a90a811506777c99a2cdf7cc1", GetVideoHash());
- EXPECT_EQ("-3.59,-2.06,-0.43,2.15,0.77,-0.95,", GetAudioHash());
- EXPECT_TRUE(demuxer_->GetTimelineOffset().is_null());
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlaybackLive) {
- ASSERT_TRUE(Start(
- GetTestDataFilePath("bear-320x240-live.webm"), PIPELINE_OK, kHashed));
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-
- EXPECT_EQ("f0be120a90a811506777c99a2cdf7cc1", GetVideoHash());
- EXPECT_EQ("-3.59,-2.06,-0.43,2.15,0.77,-0.95,", GetAudioHash());
-
- // TODO: Fix FFmpeg code to return higher resolution time values so
- // we don't have to truncate our expectations here.
- EXPECT_EQ(TruncateToFFmpegTimeResolution(kLiveTimelineOffset()),
- demuxer_->GetTimelineOffset());
-}
-
-TEST_F(PipelineIntegrationTest, F32PlaybackHashed) {
- ASSERT_TRUE(
- Start(GetTestDataFilePath("sfx_f32le.wav"), PIPELINE_OK, kHashed));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- EXPECT_EQ(std::string(kNullVideoHash), GetVideoHash());
- EXPECT_EQ("3.03,2.86,2.99,3.31,3.57,4.06,", GetAudioHash());
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlaybackEncrypted) {
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- set_need_key_cb(base::Bind(&FakeEncryptedMedia::NeedKey,
- base::Unretained(&encrypted_media)));
-
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240-av_enc-av.webm"),
- encrypted_media.decryptor()));
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource) {
- MockMediaSource source("bear-320x240.webm", kWebM, 219229);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(k320WebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-
- EXPECT_TRUE(demuxer_->GetTimelineOffset().is_null());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Live) {
- MockMediaSource source("bear-320x240-live.webm", kWebM, 219221);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(k320WebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-
- EXPECT_EQ(kLiveTimelineOffset(),
- demuxer_->GetTimelineOffset());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VP9_WebM) {
- MockMediaSource source("bear-vp9.webm", kWebMVP9, 67504);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kVP9WebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VP8A_WebM) {
- MockMediaSource source("bear-vp8a.webm", kVideoOnlyWebM, kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kVP8AWebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
- MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
- kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-// Flaky. http://crbug.com/304776
-TEST_F(PipelineIntegrationTest, DISABLED_MediaSource_Opus_Seeking_WebM) {
- MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
- kAppendWholeFile);
- StartHashedPipelineWithMediaSource(&source);
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- base::TimeDelta start_seek_time = base::TimeDelta::FromMilliseconds(1000);
- base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(2000);
-
- Play();
- ASSERT_TRUE(WaitUntilCurrentTimeIsAfter(start_seek_time));
- source.Seek(seek_time, 0x1D5, 34017);
- source.EndOfStream();
- ASSERT_TRUE(Seek(seek_time));
-
- ASSERT_TRUE(WaitUntilOnEnded());
-
- EXPECT_EQ("0.76,0.20,-0.82,-0.58,-1.29,-0.29,", GetAudioHash());
-
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_WebM) {
- MockMediaSource source("bear-320x240-16x9-aspect.webm", kWebM,
- kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-640x360.webm");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kAppendTimeMs + k640WebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_Encrypted_WebM) {
- MockMediaSource source("bear-320x240-16x9-aspect-av_enc-av.webm", kWebM,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-640x360-av_enc-av.webm");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kAppendTimeMs + k640WebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-// Config changes from encrypted to clear are not currently supported.
-TEST_F(PipelineIntegrationTest,
- MediaSource_ConfigChange_ClearThenEncrypted_WebM) {
- MockMediaSource source("bear-320x240-16x9-aspect.webm", kWebM,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-640x360-av_enc-av.webm");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- message_loop_.Run();
- EXPECT_EQ(PIPELINE_ERROR_DECODE, pipeline_status_);
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- // The second video was not added, so its time has not been added.
- EXPECT_EQ(k320WebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError());
- source.Abort();
-}
-
-// Config changes from clear to encrypted are not currently supported.
-TEST_F(PipelineIntegrationTest,
- MediaSource_ConfigChange_EncryptedThenClear_WebM) {
- MockMediaSource source("bear-320x240-16x9-aspect-av_enc-av.webm", kWebM,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-640x360.webm");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- // The second video was not added, so its time has not been added.
- EXPECT_EQ(k320WebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError());
- source.Abort();
-}
-
-#if defined(USE_PROPRIETARY_CODECS)
-TEST_F(PipelineIntegrationTest, MediaSource_ADTS) {
- MockMediaSource source("sfx.adts", kADTS, kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(325, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
-}
-
-TEST_F(PipelineIntegrationTest, MediaSource_ADTS_TimestampOffset) {
- MockMediaSource source("sfx.adts", kADTS, kAppendWholeFile);
- StartHashedPipelineWithMediaSource(&source);
- EXPECT_EQ(325, source.last_timestamp_offset().InMilliseconds());
-
- // Trim off multiple frames off the beginning of the segment which will cause
- // the first decoded frame to be incorrect if preroll isn't implemented.
- const base::TimeDelta adts_preroll_duration =
- base::TimeDelta::FromSecondsD(2.5 * 1024 / 44100);
- const base::TimeDelta append_time =
- source.last_timestamp_offset() - adts_preroll_duration;
-
- scoped_refptr<DecoderBuffer> second_file = ReadTestDataFile("sfx.adts");
- source.AppendAtTimeWithWindow(append_time,
- append_time + adts_preroll_duration,
- kInfiniteDuration(),
- second_file->data(),
- second_file->data_size());
- source.EndOfStream();
-
- EXPECT_EQ(592, source.last_timestamp_offset().InMilliseconds());
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(592, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
-
- // Verify preroll is stripped.
- EXPECT_EQ("-0.06,0.97,-0.90,-0.70,-0.53,-0.34,", GetAudioHash());
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlaybackHashed_MP3) {
- ASSERT_TRUE(Start(GetTestDataFilePath("sfx.mp3"), PIPELINE_OK, kHashed));
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-
- // Verify codec delay and preroll are stripped.
- EXPECT_EQ("3.05,2.87,3.00,3.32,3.58,4.08,", GetAudioHash());
-}
-
-TEST_F(PipelineIntegrationTest, MediaSource_MP3) {
- MockMediaSource source("sfx.mp3", kMP3, kAppendWholeFile);
- StartHashedPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(313, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
-
- // Verify that codec delay was stripped.
- EXPECT_EQ("1.01,2.71,4.18,4.32,3.04,1.12,", GetAudioHash());
-}
-
-TEST_F(PipelineIntegrationTest, MediaSource_MP3_TimestampOffset) {
- MockMediaSource source("sfx.mp3", kMP3, kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
- EXPECT_EQ(313, source.last_timestamp_offset().InMilliseconds());
-
- // There are 576 silent frames at the start of this mp3. The second append
- // should trim them off.
- const base::TimeDelta mp3_preroll_duration =
- base::TimeDelta::FromSecondsD(576.0 / 44100);
- const base::TimeDelta append_time =
- source.last_timestamp_offset() - mp3_preroll_duration;
-
- scoped_refptr<DecoderBuffer> second_file = ReadTestDataFile("sfx.mp3");
- source.AppendAtTimeWithWindow(append_time,
- append_time + mp3_preroll_duration,
- kInfiniteDuration(),
- second_file->data(),
- second_file->data_size());
- source.EndOfStream();
-
- EXPECT_EQ(613, source.last_timestamp_offset().InMilliseconds());
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(613, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
-}
-
-TEST_F(PipelineIntegrationTest, MediaSource_MP3_Icecast) {
- MockMediaSource source("icy_sfx.mp3", kMP3, kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
-}
-
-TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_MP4) {
- MockMediaSource source("bear-640x360-av_frag.mp4", kMP4, kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-1280x720-av_frag.mp4");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kAppendTimeMs + k1280IsoFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest,
- MediaSource_ConfigChange_Encrypted_MP4_CENC_VideoOnly) {
- MockMediaSource source("bear-640x360-v_frag-cenc.mp4", kMP4Video,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-1280x720-v_frag-cenc.mp4");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kAppendTimeMs + k1280IsoFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest,
- MediaSource_ConfigChange_Encrypted_MP4_CENC_KeyRotation_VideoOnly) {
- MockMediaSource source("bear-640x360-v_frag-cenc-key_rotation.mp4", kMP4Video,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new RotatingKeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-1280x720-v_frag-cenc-key_rotation.mp4");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kAppendTimeMs + k1280IsoFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-// Config changes from clear to encrypted are not currently supported.
-// TODO(ddorwin): Figure out why this CHECKs in AppendAtTime().
-TEST_F(PipelineIntegrationTest,
- DISABLED_MediaSource_ConfigChange_ClearThenEncrypted_MP4_CENC) {
- MockMediaSource source("bear-640x360-av_frag.mp4", kMP4Video,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-1280x720-v_frag-cenc.mp4");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- message_loop_.Run();
- EXPECT_EQ(PIPELINE_ERROR_DECODE, pipeline_status_);
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- // The second video was not added, so its time has not been added.
- EXPECT_EQ(k640IsoFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError());
- source.Abort();
-}
-
-// Config changes from encrypted to clear are not currently supported.
-TEST_F(PipelineIntegrationTest,
- MediaSource_ConfigChange_EncryptedThenClear_MP4_CENC) {
- MockMediaSource source("bear-640x360-v_frag-cenc.mp4", kMP4Video,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- scoped_refptr<DecoderBuffer> second_file =
- ReadTestDataFile("bear-1280x720-av_frag.mp4");
-
- source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
- second_file->data(), second_file->data_size());
-
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- // The second video was not added, so its time has not been added.
- EXPECT_EQ(k640IsoCencFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError());
- source.Abort();
-}
-
-// Verify files which change configuration midstream fail gracefully.
-TEST_F(PipelineIntegrationTest, MidStreamConfigChangesFail) {
- ASSERT_TRUE(Start(
- GetTestDataFilePath("midstream_config_change.mp3"), PIPELINE_OK));
- Play();
- ASSERT_EQ(WaitUntilEndedOrError(), PIPELINE_ERROR_DECODE);
-}
-
-#endif
-
-TEST_F(PipelineIntegrationTest, BasicPlayback_16x9AspectRatio) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240-16x9-aspect.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-TEST_F(PipelineIntegrationTest, EncryptedPlayback_WebM) {
- MockMediaSource source("bear-320x240-av_enc-av.webm", kWebM, 219816);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, EncryptedPlayback_ClearStart_WebM) {
- MockMediaSource source("bear-320x240-av_enc-av_clear-1s.webm", kWebM,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, EncryptedPlayback_NoEncryptedFrames_WebM) {
- MockMediaSource source("bear-320x240-av_enc-av_clear-all.webm", kWebM,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new NoResponseApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-#if defined(USE_PROPRIETARY_CODECS)
-TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_VideoOnly) {
- MockMediaSource source("bear-1280x720-v_frag-cenc.mp4", kMP4Video,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_AudioOnly) {
- MockMediaSource source("bear-1280x720-a_frag-cenc.mp4", kMP4Audio,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest,
- EncryptedPlayback_NoEncryptedFrames_MP4_CENC_VideoOnly) {
- MockMediaSource source("bear-1280x720-v_frag-cenc_clear-all.mp4", kMP4Video,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new NoResponseApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest,
- EncryptedPlayback_NoEncryptedFrames_MP4_CENC_AudioOnly) {
- MockMediaSource source("bear-1280x720-a_frag-cenc_clear-all.mp4", kMP4Audio,
- kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new NoResponseApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VideoOnly_MP4_AVC3) {
- MockMediaSource source("bear-1280x720-v_frag-avc3.mp4", kMP4VideoAVC3,
- kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
-
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(k1280IsoAVC3FileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_KeyRotation_Video) {
- MockMediaSource source("bear-1280x720-v_frag-cenc-key_rotation.mp4",
- kMP4Video, kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new RotatingKeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-
-TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_KeyRotation_Audio) {
- MockMediaSource source("bear-1280x720-a_frag-cenc-key_rotation.mp4",
- kMP4Audio, kAppendWholeFile);
- FakeEncryptedMedia encrypted_media(new RotatingKeyProvidingApp());
- StartPipelineWithEncryptedMedia(&source, &encrypted_media);
-
- source.EndOfStream();
- ASSERT_EQ(PIPELINE_OK, pipeline_status_);
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
-}
-#endif
-
-// TODO(acolwell): Fix flakiness http://crbug.com/117921
-TEST_F(PipelineIntegrationTest, DISABLED_SeekWhilePaused) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240.webm"), PIPELINE_OK));
-
- base::TimeDelta duration(pipeline_->GetMediaDuration());
- base::TimeDelta start_seek_time(duration / 4);
- base::TimeDelta seek_time(duration * 3 / 4);
-
- Play();
- ASSERT_TRUE(WaitUntilCurrentTimeIsAfter(start_seek_time));
- Pause();
- ASSERT_TRUE(Seek(seek_time));
- EXPECT_EQ(pipeline_->GetMediaTime(), seek_time);
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-
- // Make sure seeking after reaching the end works as expected.
- Pause();
- ASSERT_TRUE(Seek(seek_time));
- EXPECT_EQ(pipeline_->GetMediaTime(), seek_time);
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// TODO(acolwell): Fix flakiness http://crbug.com/117921
-TEST_F(PipelineIntegrationTest, DISABLED_SeekWhilePlaying) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240.webm"), PIPELINE_OK));
-
- base::TimeDelta duration(pipeline_->GetMediaDuration());
- base::TimeDelta start_seek_time(duration / 4);
- base::TimeDelta seek_time(duration * 3 / 4);
-
- Play();
- ASSERT_TRUE(WaitUntilCurrentTimeIsAfter(start_seek_time));
- ASSERT_TRUE(Seek(seek_time));
- EXPECT_GE(pipeline_->GetMediaTime(), seek_time);
- ASSERT_TRUE(WaitUntilOnEnded());
-
- // Make sure seeking after reaching the end works as expected.
- ASSERT_TRUE(Seek(seek_time));
- EXPECT_GE(pipeline_->GetMediaTime(), seek_time);
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-#if defined(USE_PROPRIETARY_CODECS)
-TEST_F(PipelineIntegrationTest, Rotated_Metadata_0) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_0.mp4"), PIPELINE_OK));
- ASSERT_EQ(VIDEO_ROTATION_0, metadata_.video_rotation);
-}
-
-TEST_F(PipelineIntegrationTest, Rotated_Metadata_90) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_90.mp4"), PIPELINE_OK));
- ASSERT_EQ(VIDEO_ROTATION_90, metadata_.video_rotation);
-}
-
-TEST_F(PipelineIntegrationTest, Rotated_Metadata_180) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_180.mp4"), PIPELINE_OK));
- ASSERT_EQ(VIDEO_ROTATION_180, metadata_.video_rotation);
-}
-
-TEST_F(PipelineIntegrationTest, Rotated_Metadata_270) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_270.mp4"), PIPELINE_OK));
- ASSERT_EQ(VIDEO_ROTATION_270, metadata_.video_rotation);
-}
-#endif
-
-// Verify audio decoder & renderer can handle aborted demuxer reads.
-TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_AudioOnly) {
- ASSERT_TRUE(TestSeekDuringRead("bear-320x240-audio-only.webm", kAudioOnlyWebM,
- 8192,
- base::TimeDelta::FromMilliseconds(464),
- base::TimeDelta::FromMilliseconds(617),
- 0x10CA, 19730));
-}
-
-// Verify video decoder & renderer can handle aborted demuxer reads.
-TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_VideoOnly) {
- ASSERT_TRUE(TestSeekDuringRead("bear-320x240-video-only.webm", kVideoOnlyWebM,
- 32768,
- base::TimeDelta::FromMilliseconds(167),
- base::TimeDelta::FromMilliseconds(1668),
- 0x1C896, 65536));
-}
-
-// Verify that Opus audio in WebM containers can be played back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_AudioOnly_Opus_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-opus-end-trimming.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// Verify that VP9 video in WebM containers can be played back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_VideoOnly_VP9_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp9.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// Verify that VP9 video and Opus audio in the same WebM container can be played
-// back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_VP9_Opus_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp9-opus.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// Verify that VP8 video with alpha channel can be played back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- EXPECT_EQ(last_video_frame_format_, VideoFrame::YV12A);
-}
-
-// Verify that VP8A video with odd width/height can be played back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_Odd_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a-odd-dimensions.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- EXPECT_EQ(last_video_frame_format_, VideoFrame::YV12A);
-}
-
-// Verify that VP9 video with odd width/height can be played back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_VP9_Odd_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp9-odd-dimensions.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// Verify that VP8 video with inband text track can be played back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_VP8_WebVTT_WebM) {
- EXPECT_CALL(*this, OnAddTextTrack(_, _));
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8-webvtt.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// Verify that VP9 video with 4:4:4 subsampling can be played back.
-TEST_F(PipelineIntegrationTest, P444_VP9_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240-P444.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- EXPECT_EQ(last_video_frame_format_, VideoFrame::YV24);
-}
-
-// Verify that videos with an odd frame size playback successfully.
-TEST_F(PipelineIntegrationTest, BasicPlayback_OddVideoSize) {
- ASSERT_TRUE(Start(GetTestDataFilePath("butterfly-853x480.webm"),
- PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// Verify that OPUS audio in a webm which reports a 44.1kHz sample rate plays
-// correctly at 48kHz
-TEST_F(PipelineIntegrationTest, BasicPlayback_Opus441kHz) {
- ASSERT_TRUE(Start(GetTestDataFilePath("sfx-opus-441.webm"), PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- EXPECT_EQ(48000,
- demuxer_->GetStream(DemuxerStream::AUDIO)
- ->audio_decoder_config()
- .samples_per_second());
-}
-
-// Same as above but using MediaSource.
-TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus441kHz) {
- MockMediaSource source(
- "sfx-opus-441.webm", kOpusAudioOnlyWebM, kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
- source.EndOfStream();
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- source.Abort();
- Stop();
- EXPECT_EQ(48000,
- demuxer_->GetStream(DemuxerStream::AUDIO)
- ->audio_decoder_config()
- .samples_per_second());
-}
-
-// Ensures audio-only playback with missing or negative timestamps works. Tests
-// the common live-streaming case for chained ogg. See http://crbug.com/396864.
-TEST_F(PipelineIntegrationTest, BasicPlaybackChainedOgg) {
- ASSERT_TRUE(Start(GetTestDataFilePath("double-sfx.ogg"), PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- ASSERT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
-}
-
-// Ensures audio-video playback with missing or negative timestamps fails softly
-// instead of crashing. See http://crbug.com/396864.
-TEST_F(PipelineIntegrationTest, BasicPlaybackChainedOggVideo) {
- ASSERT_TRUE(Start(GetTestDataFilePath("double-bear.ogv"), PIPELINE_OK));
- Play();
- EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError());
- ASSERT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
-}
-
-// Tests that we signal ended even when audio runs longer than video track.
-TEST_F(PipelineIntegrationTest, BasicPlaybackAudioLongerThanVideo) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear_audio_longer_than_video.ogv"),
- PIPELINE_OK));
- // Audio track is 2000ms. Video track is 1001ms. Duration should be higher
- // of the two.
- EXPECT_EQ(2000, pipeline_->GetMediaDuration().InMilliseconds());
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-// Tests that we signal ended even when audio runs shorter than video track.
-TEST_F(PipelineIntegrationTest, BasicPlaybackAudioShorterThanVideo) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear_audio_shorter_than_video.ogv"),
- PIPELINE_OK));
- // Audio track is 500ms. Video track is 1001ms. Duration should be higher of
- // the two.
- EXPECT_EQ(1001, pipeline_->GetMediaDuration().InMilliseconds());
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
-}
-
-TEST_F(PipelineIntegrationTest, BasicPlaybackPositiveStartTime) {
- ASSERT_TRUE(
- Start(GetTestDataFilePath("nonzero-start-time.webm"), PIPELINE_OK));
- Play();
- ASSERT_TRUE(WaitUntilOnEnded());
- ASSERT_EQ(base::TimeDelta::FromMicroseconds(396000),
- demuxer_->GetStartTime());
-}
-
-} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test_base.cc b/chromium/media/filters/pipeline_integration_test_base.cc
deleted file mode 100644
index dcd506532d6..00000000000
--- a/chromium/media/filters/pipeline_integration_test_base.cc
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/pipeline_integration_test_base.h"
-
-#include "base/bind.h"
-#include "base/memory/scoped_vector.h"
-#include "media/base/media_log.h"
-#include "media/filters/audio_renderer_impl.h"
-#include "media/filters/chunk_demuxer.h"
-#include "media/filters/ffmpeg_audio_decoder.h"
-#include "media/filters/ffmpeg_demuxer.h"
-#include "media/filters/ffmpeg_video_decoder.h"
-#include "media/filters/file_data_source.h"
-#include "media/filters/opus_audio_decoder.h"
-#include "media/filters/renderer_impl.h"
-#include "media/filters/vpx_video_decoder.h"
-
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::AtMost;
-using ::testing::InvokeWithoutArgs;
-using ::testing::SaveArg;
-
-namespace media {
-
-const char kNullVideoHash[] = "d41d8cd98f00b204e9800998ecf8427e";
-const char kNullAudioHash[] = "0.00,0.00,0.00,0.00,0.00,0.00,";
-
-PipelineIntegrationTestBase::PipelineIntegrationTestBase()
- : hashing_enabled_(false),
- clockless_playback_(false),
- pipeline_(
- new Pipeline(message_loop_.message_loop_proxy(), new MediaLog())),
- ended_(false),
- pipeline_status_(PIPELINE_OK),
- last_video_frame_format_(VideoFrame::UNKNOWN),
- hardware_config_(AudioParameters(), AudioParameters()) {
- base::MD5Init(&md5_context_);
-}
-
-PipelineIntegrationTestBase::~PipelineIntegrationTestBase() {
- if (!pipeline_->IsRunning())
- return;
-
- Stop();
-}
-
-void PipelineIntegrationTestBase::SaveStatus(PipelineStatus status) {
- pipeline_status_ = status;
-}
-
-void PipelineIntegrationTestBase::OnStatusCallback(
- PipelineStatus status) {
- pipeline_status_ = status;
- message_loop_.PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
-}
-
-void PipelineIntegrationTestBase::OnStatusCallbackChecked(
- PipelineStatus expected_status,
- PipelineStatus status) {
- EXPECT_EQ(expected_status, status);
- OnStatusCallback(status);
-}
-
-PipelineStatusCB PipelineIntegrationTestBase::QuitOnStatusCB(
- PipelineStatus expected_status) {
- return base::Bind(&PipelineIntegrationTestBase::OnStatusCallbackChecked,
- base::Unretained(this),
- expected_status);
-}
-
-void PipelineIntegrationTestBase::DemuxerNeedKeyCB(
- const std::string& type,
- const std::vector<uint8>& init_data) {
- DCHECK(!init_data.empty());
- CHECK(!need_key_cb_.is_null());
- need_key_cb_.Run(type, init_data);
-}
-
-void PipelineIntegrationTestBase::OnEnded() {
- DCHECK(!ended_);
- ended_ = true;
- pipeline_status_ = PIPELINE_OK;
- message_loop_.PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
-}
-
-bool PipelineIntegrationTestBase::WaitUntilOnEnded() {
- if (ended_)
- return (pipeline_status_ == PIPELINE_OK);
- message_loop_.Run();
- EXPECT_TRUE(ended_);
- return ended_ && (pipeline_status_ == PIPELINE_OK);
-}
-
-PipelineStatus PipelineIntegrationTestBase::WaitUntilEndedOrError() {
- if (ended_ || pipeline_status_ != PIPELINE_OK)
- return pipeline_status_;
- message_loop_.Run();
- return pipeline_status_;
-}
-
-void PipelineIntegrationTestBase::OnError(PipelineStatus status) {
- DCHECK_NE(status, PIPELINE_OK);
- pipeline_status_ = status;
- message_loop_.PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
-}
-
-bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
- PipelineStatus expected_status) {
- EXPECT_CALL(*this, OnMetadata(_))
- .Times(AtMost(1))
- .WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
- .Times(AtMost(1));
- CreateDemuxer(file_path);
- pipeline_->Start(
- demuxer_.get(),
- CreateRenderer(NULL),
- base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnError, base::Unretained(this)),
- QuitOnStatusCB(expected_status),
- base::Bind(&PipelineIntegrationTestBase::OnMetadata,
- base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnBufferingStateChanged,
- base::Unretained(this)),
- base::Closure(),
- base::Bind(&PipelineIntegrationTestBase::OnAddTextTrack,
- base::Unretained(this)));
- message_loop_.Run();
- return (pipeline_status_ == PIPELINE_OK);
-}
-
-bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
- PipelineStatus expected_status,
- kTestType test_type) {
- hashing_enabled_ = test_type == kHashed;
- clockless_playback_ = test_type == kClockless;
- return Start(file_path, expected_status);
-}
-
-bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path) {
- return Start(file_path, NULL);
-}
-
-bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
- Decryptor* decryptor) {
- EXPECT_CALL(*this, OnMetadata(_))
- .Times(AtMost(1))
- .WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
- .Times(AtMost(1));
-
- CreateDemuxer(file_path);
- pipeline_->Start(
- demuxer_.get(),
- CreateRenderer(decryptor),
- base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnError, base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnStatusCallback,
- base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnMetadata,
- base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnBufferingStateChanged,
- base::Unretained(this)),
- base::Closure(),
- base::Bind(&PipelineIntegrationTestBase::OnAddTextTrack,
- base::Unretained(this)));
- message_loop_.Run();
- return (pipeline_status_ == PIPELINE_OK);
-}
-
-void PipelineIntegrationTestBase::Play() {
- pipeline_->SetPlaybackRate(1);
-}
-
-void PipelineIntegrationTestBase::Pause() {
- pipeline_->SetPlaybackRate(0);
-}
-
-bool PipelineIntegrationTestBase::Seek(base::TimeDelta seek_time) {
- ended_ = false;
-
- EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
- .WillOnce(InvokeWithoutArgs(&message_loop_, &base::MessageLoop::QuitNow));
- pipeline_->Seek(seek_time,
- base::Bind(&PipelineIntegrationTestBase::SaveStatus,
- base::Unretained(this)));
- message_loop_.Run();
- return (pipeline_status_ == PIPELINE_OK);
-}
-
-void PipelineIntegrationTestBase::Stop() {
- DCHECK(pipeline_->IsRunning());
- pipeline_->Stop(base::MessageLoop::QuitClosure());
- message_loop_.Run();
-}
-
-void PipelineIntegrationTestBase::QuitAfterCurrentTimeTask(
- const base::TimeDelta& quit_time) {
- if (pipeline_->GetMediaTime() >= quit_time ||
- pipeline_status_ != PIPELINE_OK) {
- message_loop_.Quit();
- return;
- }
-
- message_loop_.PostDelayedTask(
- FROM_HERE,
- base::Bind(&PipelineIntegrationTestBase::QuitAfterCurrentTimeTask,
- base::Unretained(this), quit_time),
- base::TimeDelta::FromMilliseconds(10));
-}
-
-bool PipelineIntegrationTestBase::WaitUntilCurrentTimeIsAfter(
- const base::TimeDelta& wait_time) {
- DCHECK(pipeline_->IsRunning());
- DCHECK_GT(pipeline_->GetPlaybackRate(), 0);
- DCHECK(wait_time <= pipeline_->GetMediaDuration());
-
- message_loop_.PostDelayedTask(
- FROM_HERE,
- base::Bind(&PipelineIntegrationTestBase::QuitAfterCurrentTimeTask,
- base::Unretained(this),
- wait_time),
- base::TimeDelta::FromMilliseconds(10));
- message_loop_.Run();
- return (pipeline_status_ == PIPELINE_OK);
-}
-
-void PipelineIntegrationTestBase::CreateDemuxer(
- const base::FilePath& file_path) {
- FileDataSource* file_data_source = new FileDataSource();
- CHECK(file_data_source->Initialize(file_path)) << "Is " << file_path.value()
- << " missing?";
- data_source_.reset(file_data_source);
-
- Demuxer::NeedKeyCB need_key_cb = base::Bind(
- &PipelineIntegrationTestBase::DemuxerNeedKeyCB, base::Unretained(this));
- demuxer_ =
- scoped_ptr<Demuxer>(new FFmpegDemuxer(message_loop_.message_loop_proxy(),
- data_source_.get(),
- need_key_cb,
- new MediaLog()));
-}
-
-scoped_ptr<Renderer> PipelineIntegrationTestBase::CreateRenderer(
- Decryptor* decryptor) {
- ScopedVector<VideoDecoder> video_decoders;
-#if !defined(MEDIA_DISABLE_LIBVPX)
- video_decoders.push_back(
- new VpxVideoDecoder(message_loop_.message_loop_proxy()));
-#endif // !defined(MEDIA_DISABLE_LIBVPX)
- video_decoders.push_back(
- new FFmpegVideoDecoder(message_loop_.message_loop_proxy()));
-
- // Disable frame dropping if hashing is enabled.
- scoped_ptr<VideoRenderer> video_renderer(new VideoRendererImpl(
- message_loop_.message_loop_proxy(),
- video_decoders.Pass(),
- base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
- base::Unretained(this),
- decryptor),
- base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
- base::Unretained(this)),
- false,
- new MediaLog()));
-
- if (!clockless_playback_) {
- audio_sink_ = new NullAudioSink(message_loop_.message_loop_proxy());
- } else {
- clockless_audio_sink_ = new ClocklessAudioSink();
- }
-
- ScopedVector<AudioDecoder> audio_decoders;
- audio_decoders.push_back(
- new FFmpegAudioDecoder(message_loop_.message_loop_proxy(), LogCB()));
- audio_decoders.push_back(
- new OpusAudioDecoder(message_loop_.message_loop_proxy()));
-
- AudioParameters out_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO,
- 44100,
- 16,
- 512);
- hardware_config_.UpdateOutputConfig(out_params);
-
- scoped_ptr<AudioRenderer> audio_renderer(new AudioRendererImpl(
- message_loop_.message_loop_proxy(),
- (clockless_playback_)
- ? static_cast<AudioRendererSink*>(clockless_audio_sink_.get())
- : audio_sink_.get(),
- audio_decoders.Pass(),
- base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
- base::Unretained(this),
- decryptor),
- hardware_config_,
- new MediaLog()));
- if (hashing_enabled_)
- audio_sink_->StartAudioHashForTesting();
-
- scoped_ptr<RendererImpl> renderer_impl(
- new RendererImpl(message_loop_.message_loop_proxy(),
- audio_renderer.Pass(),
- video_renderer.Pass()));
-
- // Prevent non-deterministic buffering state callbacks from firing (e.g., slow
- // machine, valgrind).
- renderer_impl->DisableUnderflowForTesting();
-
- if (clockless_playback_)
- renderer_impl->EnableClocklessVideoPlaybackForTesting();
-
- return renderer_impl.Pass();
-}
-
-void PipelineIntegrationTestBase::SetDecryptor(
- Decryptor* decryptor,
- const DecryptorReadyCB& decryptor_ready_cb) {
- decryptor_ready_cb.Run(
- decryptor,
- base::Bind(&PipelineIntegrationTestBase::DecryptorAttached,
- base::Unretained(this)));
- EXPECT_CALL(*this, DecryptorAttached(true));
-}
-
-void PipelineIntegrationTestBase::OnVideoRendererPaint(
- const scoped_refptr<VideoFrame>& frame) {
- last_video_frame_format_ = frame->format();
- if (!hashing_enabled_)
- return;
- frame->HashFrameForTesting(&md5_context_);
-}
-
-std::string PipelineIntegrationTestBase::GetVideoHash() {
- DCHECK(hashing_enabled_);
- base::MD5Digest digest;
- base::MD5Final(&digest, &md5_context_);
- return base::MD5DigestToBase16(digest);
-}
-
-std::string PipelineIntegrationTestBase::GetAudioHash() {
- DCHECK(hashing_enabled_);
- return audio_sink_->GetAudioHashForTesting();
-}
-
-base::TimeDelta PipelineIntegrationTestBase::GetAudioTime() {
- DCHECK(clockless_playback_);
- return clockless_audio_sink_->render_time();
-}
-
-base::TimeTicks DummyTickClock::NowTicks() {
- now_ += base::TimeDelta::FromSeconds(60);
- return now_;
-}
-
-} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test_base.h b/chromium/media/filters/pipeline_integration_test_base.h
deleted file mode 100644
index a0f25c3196c..00000000000
--- a/chromium/media/filters/pipeline_integration_test_base.h
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_PIPELINE_INTEGRATION_TEST_BASE_H_
-#define MEDIA_FILTERS_PIPELINE_INTEGRATION_TEST_BASE_H_
-
-#include "base/md5.h"
-#include "base/message_loop/message_loop.h"
-#include "media/audio/clockless_audio_sink.h"
-#include "media/audio/null_audio_sink.h"
-#include "media/base/audio_hardware_config.h"
-#include "media/base/demuxer.h"
-#include "media/base/media_keys.h"
-#include "media/base/pipeline.h"
-#include "media/base/text_track.h"
-#include "media/base/text_track_config.h"
-#include "media/base/video_frame.h"
-#include "media/filters/video_renderer_impl.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace base {
-class FilePath;
-}
-
-namespace media {
-
-class Decryptor;
-
-// Empty MD5 hash string. Used to verify empty video tracks.
-extern const char kNullVideoHash[];
-
-// Empty hash string. Used to verify empty audio tracks.
-extern const char kNullAudioHash[];
-
-// Dummy tick clock which advances extremely quickly (1 minute every time
-// NowTicks() is called).
-class DummyTickClock : public base::TickClock {
- public:
- DummyTickClock() : now_() {}
- ~DummyTickClock() override {}
- base::TimeTicks NowTicks() override;
-
- private:
- base::TimeTicks now_;
-};
-
-// Integration tests for Pipeline. Real demuxers, real decoders, and
-// base renderer implementations are used to verify pipeline functionality. The
-// renderers used in these tests rely heavily on the AudioRendererBase &
-// VideoRendererImpl implementations which contain a majority of the code used
-// in the real AudioRendererImpl & SkCanvasVideoRenderer implementations used in
-// the browser. The renderers in this test don't actually write data to a
-// display or audio device. Both of these devices are simulated since they have
-// little effect on verifying pipeline behavior and allow tests to run faster
-// than real-time.
-class PipelineIntegrationTestBase {
- public:
- PipelineIntegrationTestBase();
- virtual ~PipelineIntegrationTestBase();
-
- bool WaitUntilOnEnded();
- PipelineStatus WaitUntilEndedOrError();
- bool Start(const base::FilePath& file_path, PipelineStatus expected_status);
- // Enable playback with audio and video hashing enabled, or clockless
- // playback (audio only). Frame dropping and audio underflow will be disabled
- // if hashing enabled to ensure consistent hashes.
- enum kTestType { kHashed, kClockless };
- bool Start(const base::FilePath& file_path,
- PipelineStatus expected_status,
- kTestType test_type);
- // Initialize the pipeline and ignore any status updates. Useful for testing
- // invalid audio/video clips which don't have deterministic results.
- bool Start(const base::FilePath& file_path);
- bool Start(const base::FilePath& file_path, Decryptor* decryptor);
-
- void Play();
- void Pause();
- bool Seek(base::TimeDelta seek_time);
- void Stop();
- bool WaitUntilCurrentTimeIsAfter(const base::TimeDelta& wait_time);
-
- // Returns the MD5 hash of all video frames seen. Should only be called once
- // after playback completes. First time hashes should be generated with
- // --video-threads=1 to ensure correctness. Pipeline must have been started
- // with hashing enabled.
- std::string GetVideoHash();
-
- // Returns the hash of all audio frames seen. Should only be called once
- // after playback completes. Pipeline must have been started with hashing
- // enabled.
- std::string GetAudioHash();
-
- // Returns the time taken to render the complete audio file.
- // Pipeline must have been started with clockless playback enabled.
- base::TimeDelta GetAudioTime();
-
- protected:
- base::MessageLoop message_loop_;
- base::MD5Context md5_context_;
- bool hashing_enabled_;
- bool clockless_playback_;
- scoped_ptr<Demuxer> demuxer_;
- scoped_ptr<DataSource> data_source_;
- scoped_ptr<Pipeline> pipeline_;
- scoped_refptr<NullAudioSink> audio_sink_;
- scoped_refptr<ClocklessAudioSink> clockless_audio_sink_;
- bool ended_;
- PipelineStatus pipeline_status_;
- Demuxer::NeedKeyCB need_key_cb_;
- VideoFrame::Format last_video_frame_format_;
- DummyTickClock dummy_clock_;
- AudioHardwareConfig hardware_config_;
- PipelineMetadata metadata_;
-
- void SaveStatus(PipelineStatus status);
- void OnStatusCallbackChecked(PipelineStatus expected_status,
- PipelineStatus status);
- void OnStatusCallback(PipelineStatus status);
- PipelineStatusCB QuitOnStatusCB(PipelineStatus expected_status);
- void DemuxerNeedKeyCB(const std::string& type,
- const std::vector<uint8>& init_data);
- void set_need_key_cb(const Demuxer::NeedKeyCB& need_key_cb) {
- need_key_cb_ = need_key_cb;
- }
-
- void OnEnded();
- void OnError(PipelineStatus status);
- void QuitAfterCurrentTimeTask(const base::TimeDelta& quit_time);
-
- // Creates Demuxer and sets |demuxer_|.
- void CreateDemuxer(const base::FilePath& file_path);
-
- // Creates and returns a Renderer.
- scoped_ptr<Renderer> CreateRenderer(Decryptor* decryptor);
-
- void SetDecryptor(Decryptor* decryptor,
- const DecryptorReadyCB& decryptor_ready_cb);
- void OnVideoRendererPaint(const scoped_refptr<VideoFrame>& frame);
-
- MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
- MOCK_METHOD1(OnBufferingStateChanged, void(BufferingState));
- MOCK_METHOD1(DecryptorAttached, void(bool));
- MOCK_METHOD2(OnAddTextTrack,
- void(const TextTrackConfig& config,
- const AddTextTrackDoneCB& done_cb));
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_PIPELINE_INTEGRATION_TEST_BASE_H_
diff --git a/chromium/media/filters/source_buffer_platform_lowmem.cc b/chromium/media/filters/source_buffer_platform_lowmem.cc
new file mode 100644
index 00000000000..79757cd81c1
--- /dev/null
+++ b/chromium/media/filters/source_buffer_platform_lowmem.cc
@@ -0,0 +1,14 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/source_buffer_platform.h"
+
+namespace media {
+
+// 2MB: approximately 1 minute of 256Kbps content.
+// 30MB: approximately 1 minute of 4Mbps content.
+const int kSourceBufferAudioMemoryLimit = 2 * 1024 * 1024;
+const int kSourceBufferVideoMemoryLimit = 30 * 1024 * 1024;
+
+} // namespace media
diff --git a/chromium/media/filters/source_buffer_range.cc b/chromium/media/filters/source_buffer_range.cc
index c0f3c78d0d3..4fad27dbcef 100644
--- a/chromium/media/filters/source_buffer_range.cc
+++ b/chromium/media/filters/source_buffer_range.cc
@@ -36,7 +36,7 @@ SourceBufferRange::SourceBufferRange(
interbuffer_distance_cb_(interbuffer_distance_cb),
size_in_bytes_(0) {
CHECK(!new_buffers.empty());
- DCHECK(new_buffers.front()->IsKeyframe());
+ DCHECK(new_buffers.front()->is_key_frame());
DCHECK(!interbuffer_distance_cb.is_null());
AppendBuffersToEnd(new_buffers);
}
@@ -48,6 +48,9 @@ void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
DCHECK(media_segment_start_time_ == kNoDecodeTimestamp() ||
media_segment_start_time_ <=
new_buffers.front()->GetDecodeTimestamp());
+
+ AdjustEstimatedDurationForNewAppend(new_buffers);
+
for (BufferQueue::const_iterator itr = new_buffers.begin();
itr != new_buffers.end();
++itr) {
@@ -55,7 +58,7 @@ void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
buffers_.push_back(*itr);
size_in_bytes_ += (*itr)->data_size();
- if ((*itr)->IsKeyframe()) {
+ if ((*itr)->is_key_frame()) {
keyframe_map_.insert(
std::make_pair((*itr)->GetDecodeTimestamp(),
buffers_.size() - 1 + keyframe_map_index_base_));
@@ -63,11 +66,35 @@ void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
}
}
+void SourceBufferRange::AdjustEstimatedDurationForNewAppend(
+ const BufferQueue& new_buffers) {
+ if (buffers_.empty() || new_buffers.empty()) {
+ return;
+ }
+
+ // If the last of the previously appended buffers contains estimated duration,
+ // we now refine that estimate by taking the PTS delta from the first new
+ // buffer being appended.
+ const auto& last_appended_buffer = buffers_.back();
+ if (last_appended_buffer->is_duration_estimated()) {
+ base::TimeDelta timestamp_delta =
+ new_buffers.front()->timestamp() - last_appended_buffer->timestamp();
+ DCHECK(timestamp_delta > base::TimeDelta());
+ if (last_appended_buffer->duration() != timestamp_delta) {
+ DVLOG(1) << "Replacing estimated duration ("
+ << last_appended_buffer->duration()
+ << ") from previous range-end with derived duration ("
+ << timestamp_delta << ").";
+ last_appended_buffer->set_duration(timestamp_delta);
+ }
+ }
+}
+
void SourceBufferRange::Seek(DecodeTimestamp timestamp) {
DCHECK(CanSeekTo(timestamp));
DCHECK(!keyframe_map_.empty());
- KeyframeMap::iterator result = GetFirstKeyframeBefore(timestamp);
+ KeyframeMap::iterator result = GetFirstKeyframeAtOrBefore(timestamp);
next_buffer_index_ = result->second - keyframe_map_index_base_;
DCHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()));
}
@@ -102,14 +129,12 @@ void SourceBufferRange::SeekToStart() {
next_buffer_index_ = 0;
}
-SourceBufferRange* SourceBufferRange::SplitRange(
- DecodeTimestamp timestamp, bool is_exclusive) {
+SourceBufferRange* SourceBufferRange::SplitRange(DecodeTimestamp timestamp) {
CHECK(!buffers_.empty());
- // Find the first keyframe after |timestamp|. If |is_exclusive|, do not
- // include keyframes at |timestamp|.
+ // Find the first keyframe at or after |timestamp|.
KeyframeMap::iterator new_beginning_keyframe =
- GetFirstKeyframeAt(timestamp, is_exclusive);
+ GetFirstKeyframeAt(timestamp, false);
// If there is no keyframe after |timestamp|, we can't split the range.
if (new_beginning_keyframe == keyframe_map_.end())
@@ -175,7 +200,7 @@ SourceBufferRange::GetFirstKeyframeAt(DecodeTimestamp timestamp,
}
SourceBufferRange::KeyframeMap::iterator
-SourceBufferRange::GetFirstKeyframeBefore(DecodeTimestamp timestamp) {
+SourceBufferRange::GetFirstKeyframeAtOrBefore(DecodeTimestamp timestamp) {
KeyframeMap::iterator result = keyframe_map_.lower_bound(timestamp);
// lower_bound() returns the first element >= |timestamp|, so we want the
// previous element if it did not return the element exactly equal to
@@ -288,7 +313,7 @@ int SourceBufferRange::GetRemovalGOP(
BufferQueue::iterator buffer_itr = buffers_.begin() + keyframe_index;
KeyframeMap::iterator gop_end = keyframe_map_.end();
if (end_timestamp < GetBufferedEndTimestamp())
- gop_end = GetFirstKeyframeBefore(end_timestamp);
+ gop_end = GetFirstKeyframeAtOrBefore(end_timestamp);
// Check if the removal range is within a GOP and skip the loop if so.
// [keyframe]...[start_timestamp]...[end_timestamp]...[keyframe]
@@ -449,7 +474,7 @@ bool SourceBufferRange::CanAppendBuffersToEnd(
const BufferQueue& buffers) const {
DCHECK(!buffers_.empty());
return IsNextInSequence(buffers.front()->GetDecodeTimestamp(),
- buffers.front()->IsKeyframe());
+ buffers.front()->is_key_frame());
}
bool SourceBufferRange::BelongsToRange(DecodeTimestamp timestamp) const {
@@ -528,11 +553,11 @@ DecodeTimestamp SourceBufferRange::KeyframeBeforeTimestamp(
if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
return kNoDecodeTimestamp();
- return GetFirstKeyframeBefore(timestamp)->first;
+ return GetFirstKeyframeAtOrBefore(timestamp)->first;
}
bool SourceBufferRange::IsNextInSequence(
- DecodeTimestamp timestamp, bool is_keyframe) const {
+ DecodeTimestamp timestamp, bool is_key_frame) const {
DecodeTimestamp end = buffers_.back()->GetDecodeTimestamp();
if (end < timestamp &&
(gap_policy_ == ALLOW_GAPS ||
@@ -541,7 +566,7 @@ bool SourceBufferRange::IsNextInSequence(
}
return timestamp == end && AllowSameTimestamp(
- buffers_.back()->IsKeyframe(), is_keyframe);
+ buffers_.back()->is_key_frame(), is_key_frame);
}
base::TimeDelta SourceBufferRange::GetFudgeRoom() const {
diff --git a/chromium/media/filters/source_buffer_range.h b/chromium/media/filters/source_buffer_range.h
index 1961e340b42..0c6a8b36f34 100644
--- a/chromium/media/filters/source_buffer_range.h
+++ b/chromium/media/filters/source_buffer_range.h
@@ -87,13 +87,12 @@ class SourceBufferRange {
// Seeks to the beginning of the range.
void SeekToStart();
- // Finds the next keyframe from |buffers_| after |timestamp| (or at
- // |timestamp| if |is_exclusive| is false) and creates and returns a new
- // SourceBufferRange with the buffers from that keyframe onward.
- // The buffers in the new SourceBufferRange are moved out of this range. If
- // there is no keyframe after |timestamp|, SplitRange() returns null and this
- // range is unmodified.
- SourceBufferRange* SplitRange(DecodeTimestamp timestamp, bool is_exclusive);
+ // Finds the next keyframe from |buffers_| starting at or after |timestamp|
+ // and creates and returns a new SourceBufferRange with the buffers from that
+ // keyframe onward. The buffers in the new SourceBufferRange are moved out of
+ // this range. If there is no keyframe at or after |timestamp|, SplitRange()
+ // returns null and this range is unmodified.
+ SourceBufferRange* SplitRange(DecodeTimestamp timestamp);
// Deletes the buffers from this range starting at |timestamp|, exclusive if
// |is_exclusive| is true, inclusive otherwise.
@@ -195,7 +194,7 @@ class SourceBufferRange {
// Returns true if |timestamp| is the timestamp of the next buffer in
// sequence after |buffers_.back()|, false otherwise.
- bool IsNextInSequence(DecodeTimestamp timestamp, bool is_keyframe) const;
+ bool IsNextInSequence(DecodeTimestamp timestamp, bool is_key_frame) const;
// Adds all buffers which overlap [start, end) to the end of |buffers|. If
// no buffers exist in the range returns false, true otherwise.
@@ -207,6 +206,12 @@ class SourceBufferRange {
private:
typedef std::map<DecodeTimestamp, int> KeyframeMap;
+ // Called during AppendBuffersToEnd to adjust estimated duration at the
+ // end of the last append to match the delta in timestamps between
+ // the last append and the upcoming append. This is a workaround for
+ // WebM media where a duration is not always specified.
+ void AdjustEstimatedDurationForNewAppend(const BufferQueue& new_buffers);
+
// Seeks the range to the next keyframe after |timestamp|. If
// |skip_given_timestamp| is true, the seek will go to a keyframe with a
// timestamp strictly greater than |timestamp|.
@@ -226,7 +231,7 @@ class SourceBufferRange {
// Returns an iterator in |keyframe_map_| pointing to the first keyframe
// before or at |timestamp|.
- KeyframeMap::iterator GetFirstKeyframeBefore(DecodeTimestamp timestamp);
+ KeyframeMap::iterator GetFirstKeyframeAtOrBefore(DecodeTimestamp timestamp);
// Helper method to delete buffers in |buffers_| starting at
// |starting_point|, an iterator in |buffers_|.
diff --git a/chromium/media/filters/source_buffer_stream.cc b/chromium/media/filters/source_buffer_stream.cc
index 5a2de0b49b8..18a08b7501f 100644
--- a/chromium/media/filters/source_buffer_stream.cc
+++ b/chromium/media/filters/source_buffer_stream.cc
@@ -6,10 +6,11 @@
#include <algorithm>
#include <map>
+#include <sstream>
#include "base/bind.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/audio_splicer.h"
#include "media/filters/source_buffer_platform.h"
#include "media/filters/source_buffer_range.h"
@@ -55,6 +56,29 @@ static base::TimeDelta kSeekToStartFudgeRoom() {
return base::TimeDelta::FromMilliseconds(1000);
}
+// Helper method for logging, converts a range into a readable string.
+static std::string RangeToString(const SourceBufferRange& range) {
+ std::stringstream ss;
+ ss << "[" << range.GetStartTimestamp().InSecondsF()
+ << ";" << range.GetEndTimestamp().InSecondsF()
+ << "(" << range.GetBufferedEndTimestamp().InSecondsF() << ")]";
+ return ss.str();
+}
+
+// Helper method for logging, converts a set of ranges into a readable string.
+static std::string RangesToString(const SourceBufferStream::RangeList& ranges) {
+ if (ranges.empty())
+ return "<EMPTY>";
+
+ std::stringstream ss;
+ for (const auto* range_ptr : ranges) {
+ if (range_ptr != ranges.front())
+ ss << " ";
+ ss << RangeToString(*range_ptr);
+ }
+ return ss.str();
+}
+
static SourceBufferRange::GapPolicy TypeToGapPolicy(
SourceBufferStream::Type type) {
switch (type) {
@@ -154,8 +178,8 @@ SourceBufferStream::~SourceBufferStream() {
void SourceBufferStream::OnNewMediaSegment(
DecodeTimestamp media_segment_start_time) {
- DVLOG(1) << __FUNCTION__ << "(" << media_segment_start_time.InSecondsF()
- << ")";
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " (" << media_segment_start_time.InSecondsF() << ")";
DCHECK(!end_of_stream_);
media_segment_start_time_ = media_segment_start_time;
new_media_segment_ = true;
@@ -188,9 +212,17 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
DCHECK(media_segment_start_time_ <= buffers.front()->GetDecodeTimestamp());
DCHECK(!end_of_stream_);
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName() << ": buffers dts=["
+ << buffers.front()->GetDecodeTimestamp().InSecondsF() << ";"
+ << buffers.back()->GetDecodeTimestamp().InSecondsF() << "] pts=["
+ << buffers.front()->timestamp().InSecondsF() << ";"
+ << buffers.back()->timestamp().InSecondsF() << "(last frame dur="
+ << buffers.back()->duration().InSecondsF() << ")]";
+
// New media segments must begin with a keyframe.
- if (new_media_segment_ && !buffers.front()->IsKeyframe()) {
- MEDIA_LOG(log_cb_) << "Media segment did not begin with keyframe.";
+ // TODO(wolenetz): Relax this requirement. See http://crbug.com/229412.
+ if (new_media_segment_ && !buffers.front()->is_key_frame()) {
+ MEDIA_LOG(ERROR, log_cb_) << "Media segment did not begin with key frame.";
return false;
}
@@ -200,15 +232,16 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
if (media_segment_start_time_ < DecodeTimestamp() ||
buffers.front()->GetDecodeTimestamp() < DecodeTimestamp()) {
- MEDIA_LOG(log_cb_)
+ MEDIA_LOG(ERROR, log_cb_)
<< "Cannot append a media segment with negative timestamps.";
return false;
}
if (!IsNextTimestampValid(buffers.front()->GetDecodeTimestamp(),
- buffers.front()->IsKeyframe())) {
- MEDIA_LOG(log_cb_) << "Invalid same timestamp construct detected at time "
- << buffers.front()->GetDecodeTimestamp().InSecondsF();
+ buffers.front()->is_key_frame())) {
+ const DecodeTimestamp& dts = buffers.front()->GetDecodeTimestamp();
+ MEDIA_LOG(ERROR, log_cb_) << "Invalid same timestamp construct detected at"
+ << " time " << dts.InSecondsF();
return false;
}
@@ -227,7 +260,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
if (range_for_next_append_ != ranges_.end()) {
(*range_for_next_append_)->AppendBuffersToEnd(buffers);
last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
- last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
+ last_appended_buffer_is_keyframe_ = buffers.back()->is_key_frame();
} else {
DecodeTimestamp new_range_start_time = std::min(
media_segment_start_time_, buffers.front()->GetDecodeTimestamp());
@@ -235,25 +268,31 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
BufferQueue trimmed_buffers;
// If the new range is not being created because of a new media
- // segment, then we must make sure that we start with a keyframe.
+ // segment, then we must make sure that we start with a key frame.
// This can happen if the GOP in the previous append gets destroyed
// by a Remove() call.
if (!new_media_segment_) {
BufferQueue::const_iterator itr = buffers.begin();
- // Scan past all the non-keyframes.
- while (itr != buffers.end() && !(*itr)->IsKeyframe()) {
+ // Scan past all the non-key-frames.
+ while (itr != buffers.end() && !(*itr)->is_key_frame()) {
++itr;
}
- // If we didn't find a keyframe, then update the last appended
+ // If we didn't find a key frame, then update the last appended
// buffer state and return.
if (itr == buffers.end()) {
last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
- last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
+ last_appended_buffer_is_keyframe_ = buffers.back()->is_key_frame();
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": new buffers in the middle of media segment depend on"
+ "keyframe that has been removed, and contain no keyframes."
+ "Skipping further processing.";
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": done. ranges_=" << RangesToString(ranges_);
return true;
} else if (itr != buffers.begin()) {
- // Copy the first keyframe and everything after it into
+ // Copy the first key frame and everything after it into
// |trimmed_buffers|.
trimmed_buffers.assign(itr, buffers.end());
buffers_for_new_range = &trimmed_buffers;
@@ -272,7 +311,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
last_appended_buffer_timestamp_ =
buffers_for_new_range->back()->GetDecodeTimestamp();
last_appended_buffer_is_keyframe_ =
- buffers_for_new_range->back()->IsKeyframe();
+ buffers_for_new_range->back()->is_key_frame();
}
new_media_segment_ = false;
@@ -298,6 +337,11 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
track_buffer_.insert(track_buffer_.end(), deleted_buffers.begin(),
deleted_buffers.end());
+ DVLOG(3) << __FUNCTION__ << " Added " << deleted_buffers.size()
+ << " deleted buffers to track buffer. TB size is now "
+ << track_buffer_.size();
+ } else {
+ DVLOG(3) << __FUNCTION__ << " No deleted buffers for track buffer";
}
// Prune any extra buffers in |track_buffer_| if new keyframes
@@ -313,6 +357,8 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
GarbageCollectIfNeeded();
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": done. ranges_=" << RangesToString(ranges_);
DCHECK(IsRangeListSorted(ranges_));
DCHECK(OnlySelectedRangeIsSeeked());
return true;
@@ -320,8 +366,8 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
base::TimeDelta duration) {
- DVLOG(1) << __FUNCTION__ << "(" << start.InSecondsF()
- << ", " << end.InSecondsF()
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " (" << start.InSecondsF() << ", " << end.InSecondsF()
<< ", " << duration.InSecondsF() << ")";
DCHECK(start >= base::TimeDelta()) << start.InSecondsF();
DCHECK(start < end) << "start " << start.InSecondsF()
@@ -346,12 +392,15 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
SetSelectedRangeIfNeeded(deleted_buffers.front()->GetDecodeTimestamp());
}
-void SourceBufferStream::RemoveInternal(
- DecodeTimestamp start, DecodeTimestamp end, bool is_exclusive,
- BufferQueue* deleted_buffers) {
- DVLOG(1) << __FUNCTION__ << "(" << start.InSecondsF()
- << ", " << end.InSecondsF()
- << ", " << is_exclusive << ")";
+void SourceBufferStream::RemoveInternal(DecodeTimestamp start,
+ DecodeTimestamp end,
+ bool exclude_start,
+ BufferQueue* deleted_buffers) {
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName() << " ("
+ << start.InSecondsF() << ", " << end.InSecondsF() << ", "
+ << exclude_start << ")";
+ DVLOG(3) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": before remove ranges_=" << RangesToString(ranges_);
DCHECK(start >= DecodeTimestamp());
DCHECK(start < end) << "start " << start.InSecondsF()
@@ -365,8 +414,9 @@ void SourceBufferStream::RemoveInternal(
if (range->GetStartTimestamp() >= end)
break;
- // Split off any remaining end piece and add it to |ranges_|.
- SourceBufferRange* new_range = range->SplitRange(end, is_exclusive);
+ // Split off any remaining GOPs starting at or after |end| and add it to
+ // |ranges_|.
+ SourceBufferRange* new_range = range->SplitRange(end);
if (new_range) {
itr = ranges_.insert(++itr, new_range);
--itr;
@@ -380,7 +430,7 @@ void SourceBufferStream::RemoveInternal(
// Truncate the current range so that it only contains data before
// the removal range.
BufferQueue saved_buffers;
- bool delete_range = range->TruncateAt(start, &saved_buffers, is_exclusive);
+ bool delete_range = range->TruncateAt(start, &saved_buffers, exclude_start);
// Check to see if the current playback position was removed and
// update the selected range appropriately.
@@ -423,9 +473,11 @@ void SourceBufferStream::RemoveInternal(
++itr;
}
+ DVLOG(3) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": after remove ranges_=" << RangesToString(ranges_);
+
DCHECK(IsRangeListSorted(ranges_));
DCHECK(OnlySelectedRangeIsSeeked());
- DVLOG(1) << __FUNCTION__ << " : done";
}
void SourceBufferStream::ResetSeekState() {
@@ -456,7 +508,7 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
for (BufferQueue::const_iterator itr = buffers.begin();
itr != buffers.end(); ++itr) {
DecodeTimestamp current_timestamp = (*itr)->GetDecodeTimestamp();
- bool current_is_keyframe = (*itr)->IsKeyframe();
+ bool current_is_keyframe = (*itr)->is_key_frame();
DCHECK(current_timestamp != kNoDecodeTimestamp());
DCHECK((*itr)->duration() >= base::TimeDelta())
<< "Packet with invalid duration."
@@ -466,16 +518,16 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
if (prev_timestamp != kNoDecodeTimestamp()) {
if (current_timestamp < prev_timestamp) {
- MEDIA_LOG(log_cb_) << "Buffers were not monotonically increasing.";
+ MEDIA_LOG(ERROR, log_cb_) << "Buffers did not monotonically increase.";
return false;
}
if (current_timestamp == prev_timestamp &&
!SourceBufferRange::AllowSameTimestamp(prev_is_keyframe,
current_is_keyframe)) {
- MEDIA_LOG(log_cb_) << "Unexpected combination of buffers with the"
- << " same timestamp detected at "
- << current_timestamp.InSecondsF();
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected combination of buffers with"
+ << " the same timestamp detected at "
+ << current_timestamp.InSecondsF();
return false;
}
}
@@ -552,6 +604,11 @@ void SourceBufferStream::GarbageCollectIfNeeded() {
int bytes_to_free = ranges_size - memory_limit_;
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName() << ": Before GC"
+ << " ranges_size=" << ranges_size
+ << " ranges_=" << RangesToString(ranges_)
+ << " memory_limit_=" << memory_limit_;
+
// Begin deleting after the last appended buffer.
int bytes_freed = FreeBuffersAfterLastAppended(bytes_to_free);
@@ -561,7 +618,11 @@ void SourceBufferStream::GarbageCollectIfNeeded() {
// Begin deleting from the back.
if (bytes_to_free - bytes_freed > 0)
- FreeBuffers(bytes_to_free - bytes_freed, true);
+ bytes_freed += FreeBuffers(bytes_to_free - bytes_freed, true);
+
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName() << ": After GC"
+ << " bytes_freed=" << bytes_freed
+ << " ranges_=" << RangesToString(ranges_);
}
int SourceBufferStream::FreeBuffersAfterLastAppended(int total_bytes_to_free) {
@@ -740,7 +801,7 @@ void SourceBufferStream::PrepareRangesForNextAppend(
DecodeTimestamp prev_timestamp = last_appended_buffer_timestamp_;
bool prev_is_keyframe = last_appended_buffer_is_keyframe_;
DecodeTimestamp next_timestamp = new_buffers.front()->GetDecodeTimestamp();
- bool next_is_keyframe = new_buffers.front()->IsKeyframe();
+ bool next_is_keyframe = new_buffers.front()->is_key_frame();
if (prev_timestamp != kNoDecodeTimestamp() &&
prev_timestamp != next_timestamp) {
@@ -758,7 +819,7 @@ void SourceBufferStream::PrepareRangesForNextAppend(
// because we don't generate splice frames for same timestamp situations.
DCHECK(new_buffers.front()->splice_timestamp() !=
new_buffers.front()->timestamp());
- const bool is_exclusive =
+ const bool exclude_start =
new_buffers.front()->splice_buffers().empty() &&
prev_timestamp == next_timestamp &&
SourceBufferRange::AllowSameTimestamp(prev_is_keyframe, next_is_keyframe);
@@ -768,16 +829,19 @@ void SourceBufferStream::PrepareRangesForNextAppend(
DecodeTimestamp end = new_buffers.back()->GetDecodeTimestamp();
base::TimeDelta duration = new_buffers.back()->duration();
- if (duration != kNoTimestamp() && duration > base::TimeDelta()) {
+ // Set end time for remove to include the duration of last buffer. If the
+ // duration is estimated, use 1 microsecond instead to ensure frames are not
+ // accidentally removed due to over-estimation.
+ if (duration != kNoTimestamp() && duration > base::TimeDelta() &&
+ !new_buffers.back()->is_duration_estimated()) {
end += duration;
} else {
- // TODO(acolwell): Ensure all buffers actually have proper
- // duration info so that this hack isn't needed.
+ // TODO(chcunningham): Emit warning when 0ms durations are not expected.
// http://crbug.com/312836
end += base::TimeDelta::FromInternalValue(1);
}
- RemoveInternal(start, end, is_exclusive, deleted_buffers);
+ RemoveInternal(start, end, exclude_start, deleted_buffers);
// Restore the range seek state if necessary.
if (temporarily_select_range)
@@ -816,6 +880,9 @@ void SourceBufferStream::MergeWithAdjacentRangeIfNecessary(
}
bool transfer_current_position = selected_range_ == *next_range_itr;
+ DVLOG(3) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " merging " << RangeToString(*range_with_new_buffers)
+ << " into " << RangeToString(**next_range_itr);
range_with_new_buffers->AppendRangeToEnd(**next_range_itr,
transfer_current_position);
// Update |selected_range_| pointer if |range| has become selected after
@@ -831,6 +898,8 @@ void SourceBufferStream::MergeWithAdjacentRangeIfNecessary(
void SourceBufferStream::Seek(base::TimeDelta timestamp) {
DCHECK(timestamp >= base::TimeDelta());
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " (" << timestamp.InSecondsF() << ")";
ResetSeekState();
if (ShouldSeekToStartOfBuffered(timestamp)) {
@@ -859,12 +928,14 @@ void SourceBufferStream::Seek(base::TimeDelta timestamp) {
}
bool SourceBufferStream::IsSeekPending() const {
- return !(end_of_stream_ && IsEndSelected()) && seek_pending_;
+ return seek_pending_ && !IsEndOfStreamReached();
}
void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
DecodeTimestamp duration_dts =
DecodeTimestamp::FromPresentationTime(duration);
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " (" << duration.InSecondsF() << ")";
RangeList::iterator itr = ranges_.end();
for (itr = ranges_.begin(); itr != ranges_.end(); ++itr) {
@@ -899,17 +970,34 @@ void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
SourceBufferStream::Status SourceBufferStream::GetNextBuffer(
scoped_refptr<StreamParserBuffer>* out_buffer) {
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName();
if (!pending_buffer_.get()) {
const SourceBufferStream::Status status = GetNextBufferInternal(out_buffer);
- if (status != SourceBufferStream::kSuccess || !SetPendingBuffer(out_buffer))
+ if (status != SourceBufferStream::kSuccess ||
+ !SetPendingBuffer(out_buffer)) {
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": no pending buffer, returning status " << status;
return status;
+ }
}
- if (!pending_buffer_->splice_buffers().empty())
- return HandleNextBufferWithSplice(out_buffer);
+ if (!pending_buffer_->splice_buffers().empty()) {
+ const SourceBufferStream::Status status =
+ HandleNextBufferWithSplice(out_buffer);
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": handled next buffer with splice, returning status "
+ << status;
+ return status;
+ }
DCHECK(pending_buffer_->preroll_buffer().get());
- return HandleNextBufferWithPreroll(out_buffer);
+
+ const SourceBufferStream::Status status =
+ HandleNextBufferWithPreroll(out_buffer);
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": handled next buffer with preroll, returning status "
+ << status;
+ return status;
}
SourceBufferStream::Status SourceBufferStream::HandleNextBufferWithSplice(
@@ -997,6 +1085,7 @@ SourceBufferStream::Status SourceBufferStream::GetNextBufferInternal(
return kConfigChange;
}
+ DVLOG(3) << __FUNCTION__ << " Next buffer coming from track_buffer_";
*out_buffer = next_buffer;
track_buffer_.pop_front();
last_output_buffer_timestamp_ = (*out_buffer)->GetDecodeTimestamp();
@@ -1009,9 +1098,15 @@ SourceBufferStream::Status SourceBufferStream::GetNextBufferInternal(
return kSuccess;
}
+ DCHECK(track_buffer_.empty());
if (!selected_range_ || !selected_range_->HasNextBuffer()) {
- if (end_of_stream_ && IsEndSelected())
+ if (IsEndOfStreamReached()) {
return kEndOfStream;
+ }
+ DVLOG(3) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": returning kNeedBuffer "
+ << (selected_range_ ? "(selected range has no next buffer)"
+ : "(no selected range)");
return kNeedBuffer;
}
@@ -1077,7 +1172,8 @@ void SourceBufferStream::SeekAndSetSelectedRange(
}
void SourceBufferStream::SetSelectedRange(SourceBufferRange* range) {
- DVLOG(1) << __FUNCTION__ << " : " << selected_range_ << " -> " << range;
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": " << selected_range_ << " -> " << range;
if (selected_range_)
selected_range_->ResetNextBufferPosition();
DCHECK(!range || range->HasNextBufferPosition());
@@ -1111,7 +1207,10 @@ void SourceBufferStream::UnmarkEndOfStream() {
end_of_stream_ = false;
}
-bool SourceBufferStream::IsEndSelected() const {
+bool SourceBufferStream::IsEndOfStreamReached() const {
+ if (!end_of_stream_ || !track_buffer_.empty())
+ return false;
+
if (ranges_.empty())
return true;
@@ -1121,6 +1220,9 @@ bool SourceBufferStream::IsEndSelected() const {
return seek_buffer_timestamp_ >= last_range_end_time;
}
+ if (!selected_range_)
+ return true;
+
return selected_range_ == ranges_.back();
}
@@ -1152,12 +1254,12 @@ bool SourceBufferStream::UpdateAudioConfig(const AudioDecoderConfig& config) {
DVLOG(3) << "UpdateAudioConfig.";
if (audio_configs_[0].codec() != config.codec()) {
- MEDIA_LOG(log_cb_) << "Audio codec changes not allowed.";
+ MEDIA_LOG(ERROR, log_cb_) << "Audio codec changes not allowed.";
return false;
}
if (audio_configs_[0].is_encrypted() != config.is_encrypted()) {
- MEDIA_LOG(log_cb_) << "Audio encryption changes not allowed.";
+ MEDIA_LOG(ERROR, log_cb_) << "Audio encryption changes not allowed.";
return false;
}
@@ -1183,12 +1285,12 @@ bool SourceBufferStream::UpdateVideoConfig(const VideoDecoderConfig& config) {
DVLOG(3) << "UpdateVideoConfig.";
if (video_configs_[0].codec() != config.codec()) {
- MEDIA_LOG(log_cb_) << "Video codec changes not allowed.";
+ MEDIA_LOG(ERROR, log_cb_) << "Video codec changes not allowed.";
return false;
}
if (video_configs_[0].is_encrypted() != config.is_encrypted()) {
- MEDIA_LOG(log_cb_) << "Video encryption changes not allowed.";
+ MEDIA_LOG(ERROR, log_cb_) << "Video encryption changes not allowed.";
return false;
}
@@ -1228,7 +1330,8 @@ void SourceBufferStream::CompleteConfigChange() {
void SourceBufferStream::SetSelectedRangeIfNeeded(
const DecodeTimestamp timestamp) {
- DVLOG(1) << __FUNCTION__ << "(" << timestamp.InSecondsF() << ")";
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName()
+ << "(" << timestamp.InSecondsF() << ")";
if (selected_range_) {
DCHECK(track_buffer_.empty());
@@ -1245,8 +1348,11 @@ void SourceBufferStream::SetSelectedRangeIfNeeded(
// If the next buffer timestamp is not known then use a timestamp just after
// the timestamp on the last buffer returned by GetNextBuffer().
if (start_timestamp == kNoDecodeTimestamp()) {
- if (last_output_buffer_timestamp_ == kNoDecodeTimestamp())
+ if (last_output_buffer_timestamp_ == kNoDecodeTimestamp()) {
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " no previous output timestamp";
return;
+ }
start_timestamp = last_output_buffer_timestamp_ +
base::TimeDelta::FromInternalValue(1);
@@ -1256,8 +1362,11 @@ void SourceBufferStream::SetSelectedRangeIfNeeded(
FindNewSelectedRangeSeekTimestamp(start_timestamp);
// If we don't have buffered data to seek to, then return.
- if (seek_timestamp == kNoDecodeTimestamp())
- return;
+ if (seek_timestamp == kNoDecodeTimestamp()) {
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " couldn't find new selected range seek timestamp";
+ return;
+ }
DCHECK(track_buffer_.empty());
SeekAndSetSelectedRange(*FindExistingRangeFor(seek_timestamp),
@@ -1277,8 +1386,11 @@ DecodeTimestamp SourceBufferStream::FindNewSelectedRangeSeekTimestamp(
}
}
- if (itr == ranges_.end())
+ if (itr == ranges_.end()) {
+ DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " no buffered data for dts=" << start_timestamp.InSecondsF();
return kNoDecodeTimestamp();
+ }
// First check for a keyframe timestamp >= |start_timestamp|
// in the current range.
@@ -1441,14 +1553,16 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
}
}
- // Don't generate splice frames which represent less than two frames, since we
- // need at least that much to generate a crossfade. Per the spec, make this
- // check using the sample rate of the overlapping buffers.
+ // Don't generate splice frames which represent less than a millisecond (which
+ // is frequently the extent of timestamp resolution for poorly encoded media)
+ // or less than two frames (need at least two to crossfade).
const base::TimeDelta splice_duration =
pre_splice_buffers.back()->timestamp() +
pre_splice_buffers.back()->duration() - splice_timestamp;
- const base::TimeDelta minimum_splice_duration = base::TimeDelta::FromSecondsD(
- 2.0 / audio_configs_[append_config_index_].samples_per_second());
+ const base::TimeDelta minimum_splice_duration = std::max(
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSecondsD(
+ 2.0 / audio_configs_[append_config_index_].samples_per_second()));
if (splice_duration < minimum_splice_duration) {
DVLOG(1) << "Can't generate splice: not enough samples for crossfade; have "
<< splice_duration.InMicroseconds() << " us, but need "
@@ -1456,6 +1570,9 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
return;
}
+ DVLOG(1) << "Generating splice frame @ " << new_buffers.front()->timestamp()
+ << ", splice duration: " << splice_duration.InMicroseconds()
+ << " us";
new_buffers.front()->ConvertToSpliceBuffer(pre_splice_buffers);
}
diff --git a/chromium/media/filters/source_buffer_stream.h b/chromium/media/filters/source_buffer_stream.h
index b5bb855c1fa..0e70ff79c9d 100644
--- a/chromium/media/filters/source_buffer_stream.h
+++ b/chromium/media/filters/source_buffer_stream.h
@@ -33,6 +33,7 @@ class SourceBufferRange;
class MEDIA_EXPORT SourceBufferStream {
public:
typedef StreamParser::BufferQueue BufferQueue;
+ typedef std::list<SourceBufferRange*> RangeList;
// Status returned by GetNextBuffer().
// kSuccess: Indicates that the next buffer was returned.
@@ -148,8 +149,6 @@ class MEDIA_EXPORT SourceBufferStream {
private:
friend class SourceBufferStreamTest;
- typedef std::list<SourceBufferRange*> RangeList;
-
// Frees up space if the SourceBufferStream is taking up too much memory.
void GarbageCollectIfNeeded();
@@ -276,9 +275,12 @@ class MEDIA_EXPORT SourceBufferStream {
// stream, and "TEXT" for a text stream.
std::string GetStreamTypeName() const;
- // Returns true if we don't have any ranges or the last range is selected
- // or there is a pending seek beyond any existing ranges.
- bool IsEndSelected() const;
+ // Returns true if end of stream has been reached, i.e. the
+ // following conditions are met:
+ // 1. end of stream is marked and there is nothing in the track_buffer.
+ // 2. We don't have any ranges, or the last or no range is selected,
+ // or there is a pending seek beyond any existing ranges.
+ bool IsEndOfStreamReached() const;
// Deletes the range pointed to by |*itr| and removes it from |ranges_|.
// If |*itr| points to |selected_range_|, then |selected_range_| is set to
@@ -288,15 +290,16 @@ class MEDIA_EXPORT SourceBufferStream {
// Helper function used by Remove() and PrepareRangesForNextAppend() to
// remove buffers and ranges between |start| and |end|.
- // |is_exclusive| - If set to true, buffers with timestamps that
+ // |exclude_start| - If set to true, buffers with timestamps that
// match |start| are not removed. If set to false, buffers with
// timestamps that match |start| will be removed.
// |*deleted_buffers| - Filled with buffers for the current playback position
// if the removal range included the current playback position. These buffers
// can be used as candidates for placing in the |track_buffer_|.
- void RemoveInternal(
- DecodeTimestamp start, DecodeTimestamp end, bool is_exclusive,
- BufferQueue* deleted_buffers);
+ void RemoveInternal(DecodeTimestamp start,
+ DecodeTimestamp end,
+ bool exclude_start,
+ BufferQueue* deleted_buffers);
Type GetType() const;
@@ -325,8 +328,8 @@ class MEDIA_EXPORT SourceBufferStream {
// appropriately and returns true. Otherwise returns false.
bool SetPendingBuffer(scoped_refptr<StreamParserBuffer>* out_buffer);
- // Callback used to report error strings that can help the web developer
- // figure out what is wrong with the content.
+ // Callback used to report log messages that can help the web developer figure
+ // out what is wrong with the content.
LogCB log_cb_;
// List of disjoint buffered ranges, ordered by start time.
diff --git a/chromium/media/filters/source_buffer_stream_unittest.cc b/chromium/media/filters/source_buffer_stream_unittest.cc
index 0fbd0425a51..e360ba54723 100644
--- a/chromium/media/filters/source_buffer_stream_unittest.cc
+++ b/chromium/media/filters/source_buffer_stream_unittest.cc
@@ -50,7 +50,7 @@ class SourceBufferStreamTest : public testing::Test {
void SetTextStream() {
video_config_ = TestVideoConfig::Invalid();
TextTrackConfig config(kTextSubtitles, "", "", "");
- stream_.reset(new SourceBufferStream(config, LogCB(), true));
+ stream_.reset(new SourceBufferStream(config, log_cb(), true));
SetStreamInfo(2, 2);
}
@@ -66,7 +66,7 @@ class SourceBufferStreamTest : public testing::Test {
false,
base::TimeDelta(),
0);
- stream_.reset(new SourceBufferStream(audio_config_, LogCB(), true));
+ stream_.reset(new SourceBufferStream(audio_config_, log_cb(), true));
// Equivalent to 2ms per frame.
SetStreamInfo(500, 500);
@@ -232,7 +232,7 @@ class SourceBufferStreamTest : public testing::Test {
break;
if (expect_keyframe && current_position == starting_position)
- EXPECT_TRUE(buffer->IsKeyframe());
+ EXPECT_TRUE(buffer->is_key_frame());
if (expected_data) {
const uint8* actual_data = buffer->data();
@@ -293,9 +293,20 @@ class SourceBufferStreamTest : public testing::Test {
ss << "|" << buffer->GetDecodeTimestamp().InMilliseconds();
}
+ // Check duration if expected timestamp contains it.
+ if (timestamps[i].find('D') != std::string::npos) {
+ ss << "D" << buffer->duration().InMilliseconds();
+ }
+
+ // Check duration estimation if expected timestamp contains it.
+ if (timestamps[i].find('E') != std::string::npos &&
+ buffer->is_duration_estimated()) {
+ ss << "E";
+ }
+
// Handle preroll buffers.
if (EndsWith(timestamps[i], "P", true)) {
- ASSERT_TRUE(buffer->IsKeyframe());
+ ASSERT_TRUE(buffer->is_key_frame());
scoped_refptr<StreamParserBuffer> preroll_buffer;
preroll_buffer.swap(buffer);
@@ -312,10 +323,10 @@ class SourceBufferStreamTest : public testing::Test {
preroll_buffer->GetDecodeTimestamp());
ASSERT_EQ(kInfiniteDuration(), preroll_buffer->discard_padding().first);
ASSERT_EQ(base::TimeDelta(), preroll_buffer->discard_padding().second);
- ASSERT_TRUE(buffer->IsKeyframe());
+ ASSERT_TRUE(buffer->is_key_frame());
ss << "P";
- } else if (buffer->IsKeyframe()) {
+ } else if (buffer->is_key_frame()) {
ss << "K";
}
@@ -338,6 +349,12 @@ class SourceBufferStreamTest : public testing::Test {
EXPECT_EQ(SourceBufferStream::kNeedBuffer, stream_->GetNextBuffer(&buffer));
}
+ void CheckEOSReached() {
+ scoped_refptr<StreamParserBuffer> buffer;
+ EXPECT_EQ(SourceBufferStream::kEndOfStream,
+ stream_->GetNextBuffer(&buffer));
+ }
+
void CheckVideoConfig(const VideoDecoderConfig& config) {
const VideoDecoderConfig& actual = stream_->GetCurrentVideoDecoderConfig();
EXPECT_TRUE(actual.Matches(config))
@@ -352,10 +369,7 @@ class SourceBufferStreamTest : public testing::Test {
<< "\nActual: " << actual.AsHumanReadableString();
}
- const LogCB log_cb() {
- return base::Bind(&SourceBufferStreamTest::DebugMediaLog,
- base::Unretained(this));
- }
+ const LogCB log_cb() { return base::Bind(&AddLogEntryForTest); }
base::TimeDelta frame_duration() const { return frame_duration_; }
@@ -430,9 +444,12 @@ class SourceBufferStreamTest : public testing::Test {
}
// StringToBufferQueue() allows for the generation of StreamParserBuffers from
- // coded strings of timestamps separated by spaces. Supported syntax:
+ // coded strings of timestamps separated by spaces.
//
- // xx:
+ // Supported syntax (options must be in this order):
+ // pp[|dd][Dzz][E][P][K]
+ //
+ // pp:
// Generates a StreamParserBuffer with decode and presentation timestamp xx.
// E.g., "0 1 2 3".
//
@@ -440,19 +457,26 @@ class SourceBufferStreamTest : public testing::Test {
// Generates a StreamParserBuffer with presentation timestamp pp and decode
// timestamp dd. E.g., "0|0 3|1 1|2 2|3".
//
- // ##Dzz
- // Specifies the duration for a buffer. ## represents one of the 2 timestamp
- // formats above. zz specifies the duration of the buffer in milliseconds.
- // If the duration isn't specified with this syntax then the buffer duration
- // is determined by the difference between the decode timestamp in ## and
- // the decode timestamp of the previous buffer in the string. If the string
- // only contains 1 buffer then the duration must be explicitly specified with
- // this format.
+ // Dzz
+ // Explicitly describe the duration of the buffer. zz specifies the duration
+ // in milliseconds. If the duration isn't specified with this syntax, the
+ // duration is derived using the timestamp delta between this buffer and the
+ // next buffer. If not specified, the final buffer will simply copy the
+ // duration of the previous buffer. If the queue only contains 1 buffer then
+ // the duration must be explicitly specified with this format.
+ // E.g. "0D10 10D20"
+ //
+ // E:
+ // Indicates that the buffer should be marked as containing an *estimated*
+ // duration. E.g., "0D20E 20 25E 30"
//
- // ##K:
- // Indicates the buffer with timestamp ## reflects a keyframe. ##
- // can be any of the 3 timestamp formats above.
- // E.g., "0K 1|2K 2|4D2K".
+ // P:
+ // Indicates the buffer with will also have a preroll buffer
+ // associated with it. The preroll buffer will just be dummy data.
+ // E.g. "0P 5 10"
+ //
+ // K:
+ // Indicates the buffer is a keyframe. E.g., "0K 1|2K 2|4D2K 6 8".
//
// S(a# ... y# z#)
// Indicates a splice frame buffer should be created with timestamp z#. The
@@ -474,6 +498,8 @@ class SourceBufferStreamTest : public testing::Test {
bool is_keyframe = false;
bool has_preroll = false;
bool last_splice_frame = false;
+ bool is_duration_estimated = false;
+
// Handle splice frame starts.
if (StartsWithASCII(timestamps[i], "S(", true)) {
CHECK(!splice_frame);
@@ -508,6 +534,12 @@ class SourceBufferStreamTest : public testing::Test {
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
+ if (EndsWith(timestamps[i], "E", true)) {
+ is_duration_estimated = true;
+ // Remove the "E" off of the token.
+ timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
+ }
+
int duration_in_ms = 0;
size_t duration_pos = timestamps[i].find('D');
if (duration_pos != std::string::npos) {
@@ -534,6 +566,7 @@ class SourceBufferStreamTest : public testing::Test {
StreamParserBuffer::CopyFrom(&kDataA, kDataSize, is_keyframe,
DemuxerStream::AUDIO, 0);
buffer->set_timestamp(base::TimeDelta::FromMilliseconds(pts_in_ms));
+ buffer->set_is_duration_estimated(is_duration_estimated);
if (dts_in_ms != pts_in_ms) {
buffer->SetDecodeTimestamp(
@@ -628,10 +661,6 @@ class SourceBufferStreamTest : public testing::Test {
}
}
- void DebugMediaLog(const std::string& log) {
- DVLOG(1) << log;
- }
-
int frames_per_second_;
int keyframes_per_second_;
base::TimeDelta frame_duration_;
@@ -2301,7 +2330,7 @@ TEST_F(SourceBufferStreamTest, PresentationTimestampIndependence) {
scoped_refptr<StreamParserBuffer> buffer;
ASSERT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kSuccess);
- if (buffer->IsKeyframe()) {
+ if (buffer->is_key_frame()) {
EXPECT_EQ(DecodeTimestamp::FromPresentationTime(buffer->timestamp()),
buffer->GetDecodeTimestamp());
last_keyframe_idx = i;
@@ -3216,6 +3245,55 @@ TEST_F(SourceBufferStreamTest,
CheckExpectedRangesByTimestamp("{ [0,90) }");
}
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_MarkEOS) {
+ // Append 1 buffer at positions 0 through 8.
+ NewSegmentAppend(0, 9);
+
+ // Check expected ranges.
+ CheckExpectedRanges("{ [0,8) }");
+
+ // Seek to 5.
+ Seek(5);
+
+ // Set duration to be before the seeked to position.
+ // This will result in truncation of the selected range and a reset
+ // of NextBufferPosition.
+ stream_->OnSetDuration(frame_duration() * 4);
+
+ // Check the expected ranges.
+ CheckExpectedRanges("{ [0,3) }");
+
+ // Mark EOS reached.
+ stream_->MarkEndOfStream();
+
+ // Expect EOS to be reached.
+ CheckEOSReached();
+}
+
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_MarkEOS_IsSeekPending) {
+ // Append 1 buffer at positions 0 through 8.
+ NewSegmentAppend(0, 9);
+
+ // Check expected ranges.
+ CheckExpectedRanges("{ [0,8) }");
+
+ // Seek to 9 which will result in a pending seek.
+ Seek(9);
+
+ // Set duration to be before the seeked to position.
+ // This will result in truncation of the selected range and a reset
+ // of NextBufferPosition.
+ stream_->OnSetDuration(frame_duration() * 4);
+
+ // Check the expected ranges.
+ CheckExpectedRanges("{ [0,3) }");
+
+ EXPECT_TRUE(stream_->IsSeekPending());
+ // Mark EOS reached.
+ stream_->MarkEndOfStream();
+ EXPECT_FALSE(stream_->IsSeekPending());
+}
+
// Test the case were the current playback position is at the end of the
// buffered data and several overlaps occur that causes the selected
// range to get split and then merged back into a single range.
@@ -3916,6 +3994,33 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoTinySplices) {
CheckNoNextBuffer();
}
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoMillisecondSplices) {
+ video_config_ = TestVideoConfig::Invalid();
+ audio_config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
+ CHANNEL_LAYOUT_STEREO, 4000, NULL, 0, false, false,
+ base::TimeDelta(), 0);
+ stream_.reset(new SourceBufferStream(audio_config_, log_cb(), true));
+ // Equivalent to 0.5ms per frame.
+ SetStreamInfo(2000, 2000);
+ Seek(0);
+
+ // Append four buffers with a 0.5ms duration each.
+ NewSegmentAppend(0, 4);
+ CheckExpectedRangesByTimestamp("{ [0,2) }");
+
+ // Overlap the range [0, 2) with [1.25, 2); this results in an overlap of
+ // 0.75ms between the ranges.
+ NewSegmentAppend_OffsetFirstBuffer(2, 2,
+ base::TimeDelta::FromMillisecondsD(0.25));
+ CheckExpectedRangesByTimestamp("{ [0,2) }");
+
+ // A splice frame should not be generated (indicated by the lack of a config
+ // change in the expected buffer string) since it requires at least 1ms of
+ // data to crossfade.
+ CheckExpectedBuffers("0K 0K 1K 1K 1K");
+ CheckNoNextBuffer();
+}
+
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_Preroll) {
SetAudioStream();
Seek(0);
@@ -3941,6 +4046,83 @@ TEST_F(SourceBufferStreamTest, BFrames) {
CheckNoNextBuffer();
}
+TEST_F(SourceBufferStreamTest, RemoveShouldAlwaysExcludeEnd) {
+ NewSegmentAppend("10D2K 12D2 14D2");
+ CheckExpectedRangesByTimestamp("{ [10,16) }");
+
+ // Start new segment, appending KF to abut the start of previous segment.
+ NewSegmentAppend("0D10K");
+ Seek(0);
+ CheckExpectedRangesByTimestamp("{ [0,16) }");
+ CheckExpectedBuffers("0K 10K 12 14");
+ CheckNoNextBuffer();
+
+ // Append another buffer with the same timestamp as the last KF. This triggers
+ // special logic that allows two buffers to have the same timestamp. When
+ // preparing for this new append, there is no reason to remove the later GOP
+ // starting at timestamp 10. This verifies the fix for http://crbug.com/469325
+ // where the decision *not* to remove the start of the overlapped range was
+ // erroneously triggering buffers with a timestamp matching the end
+ // of the append (and any later dependent frames) to be removed.
+ AppendBuffers("0D10");
+ Seek(0);
+ CheckExpectedRangesByTimestamp("{ [0,16) }");
+ CheckExpectedBuffers("0K 0 10K 12 14");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, RefinedDurationEstimates_BackOverlap) {
+ // Append a few buffers, the last one having estimated duration.
+ NewSegmentAppend("0K 5 10 20D10E");
+ CheckExpectedRangesByTimestamp("{ [0,30) }");
+ Seek(0);
+ CheckExpectedBuffers("0K 5 10 20D10E");
+ CheckNoNextBuffer();
+
+ // Append a buffer to the end that overlaps the *back* of the existing range.
+ // This should trigger the estimated duration to be recomputed as a timestamp
+ // delta.
+ AppendBuffers("25D10");
+ CheckExpectedRangesByTimestamp("{ [0,35) }");
+ Seek(0);
+ // The duration of the buffer at time 20 has changed from 10ms to 5ms.
+ CheckExpectedBuffers("0K 5 10 20D5E 25");
+ CheckNoNextBuffer();
+
+ // If the last buffer is removed, the adjusted duration should remain at 5ms.
+ RemoveInMs(25, 35, 35);
+ CheckExpectedRangesByTimestamp("{ [0,25) }");
+ Seek(0);
+ CheckExpectedBuffers("0K 5 10 20D5E");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, RefinedDurationEstimates_FrontOverlap) {
+ // Append a few buffers.
+ NewSegmentAppend("10K 15 20D5");
+ CheckExpectedRangesByTimestamp("{ [10,25) }");
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(10));
+ CheckExpectedBuffers("10K 15 20");
+ CheckNoNextBuffer();
+
+ // Append new buffers, where the last has estimated duration that overlaps the
+ // *front* of the existing range. The overlap should trigger refinement of the
+ // estimated duration from 7ms to 5ms.
+ NewSegmentAppend("0K 5D7E");
+ CheckExpectedRangesByTimestamp("{ [0,25) }");
+ Seek(0);
+ CheckExpectedBuffers("0K 5D5E 10K 15 20");
+ CheckNoNextBuffer();
+
+ // If the overlapped buffer at timestamp 10 is removed, the adjusted duration
+ // should remain adjusted.
+ RemoveInMs(10, 20, 25);
+ CheckExpectedRangesByTimestamp("{ [0,10) }");
+ Seek(0);
+ CheckExpectedBuffers("0K 5D5E");
+ CheckNoNextBuffer();
+}
+
// TODO(vrk): Add unit tests where keyframes are unaligned between streams.
// (crbug.com/133557)
diff --git a/chromium/media/filters/stream_parser_factory.cc b/chromium/media/filters/stream_parser_factory.cc
index b47fa753733..1315bd27b2c 100644
--- a/chromium/media/filters/stream_parser_factory.cc
+++ b/chromium/media/filters/stream_parser_factory.cc
@@ -8,7 +8,6 @@
#include "base/metrics/histogram.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
-#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/formats/mpeg/adts_stream_parser.h"
#include "media/formats/mpeg/mpeg1_audio_stream_parser.h"
@@ -102,31 +101,42 @@ static StreamParser* BuildWebMParser(
// AAC Object Type IDs that Chrome supports.
static const int kAACLCObjectType = 2;
static const int kAACSBRObjectType = 5;
+static const int kAACPSObjectType = 29;
static int GetMP4AudioObjectType(const std::string& codec_id,
const LogCB& log_cb) {
+ // From RFC 6381 section 3.3 (ISO Base Media File Format Name Space):
+ // When the first element of a ['codecs' parameter value] is 'mp4a' ...,
+ // the second element is a hexadecimal representation of the MP4 Registration
+ // Authority ObjectTypeIndication (OTI). Note that MP4RA uses a leading "0x"
+ // with these values, which is omitted here and hence implied.
std::vector<std::string> tokens;
if (Tokenize(codec_id, ".", &tokens) == 3 &&
tokens[0] == "mp4a" && tokens[1] == "40") {
+ // From RFC 6381 section 3.3:
+ // One of the OTI values for 'mp4a' is 40 (identifying MPEG-4 audio). For
+ // this value, the third element identifies the audio ObjectTypeIndication
+ // (OTI) ... expressed as a decimal number.
int audio_object_type;
- if (base::HexStringToInt(tokens[2], &audio_object_type))
+ if (base::StringToInt(tokens[2], &audio_object_type))
return audio_object_type;
}
- MEDIA_LOG(log_cb) << "Malformed mimetype codec '" << codec_id << "'";
+ MEDIA_LOG(DEBUG, log_cb) << "Malformed mimetype codec '" << codec_id << "'";
return -1;
}
bool ValidateMP4ACodecID(const std::string& codec_id, const LogCB& log_cb) {
int audio_object_type = GetMP4AudioObjectType(codec_id, log_cb);
if (audio_object_type == kAACLCObjectType ||
- audio_object_type == kAACSBRObjectType) {
+ audio_object_type == kAACSBRObjectType ||
+ audio_object_type == kAACPSObjectType) {
return true;
}
- MEDIA_LOG(log_cb) << "Unsupported audio object type "
- << "0x" << std::hex << audio_object_type
- << " in codec '" << codec_id << "'";
+ MEDIA_LOG(DEBUG, log_cb) << "Unsupported audio object type "
+ << audio_object_type << " in codec '" << codec_id
+ << "'";
return false;
}
@@ -170,7 +180,8 @@ static StreamParser* BuildMP4Parser(
audio_object_types.insert(mp4::kISO_14496_3);
- if (audio_object_type == kAACSBRObjectType) {
+ if (audio_object_type == kAACSBRObjectType ||
+ audio_object_type == kAACPSObjectType) {
has_sbr = true;
break;
}
@@ -219,9 +230,12 @@ static StreamParser* BuildMP2TParser(
bool has_sbr = false;
for (size_t i = 0; i < codecs.size(); ++i) {
std::string codec_id = codecs[i];
- if (MatchPattern(codec_id, kMPEG4AACCodecInfo.pattern) &&
- GetMP4AudioObjectType(codec_id, log_cb) == kAACSBRObjectType) {
- has_sbr = true;
+ if (MatchPattern(codec_id, kMPEG4AACCodecInfo.pattern)) {
+ int audio_object_type = GetMP4AudioObjectType(codec_id, log_cb);
+ if (audio_object_type == kAACSBRObjectType ||
+ audio_object_type == kAACPSObjectType) {
+ has_sbr = true;
+ }
}
}
@@ -260,13 +274,6 @@ static bool VerifyCodec(
std::vector<CodecInfo::HistogramTag>* video_codecs) {
switch (codec_info->type) {
case CodecInfo::AUDIO:
-#if defined(ENABLE_EAC3_PLAYBACK)
- if (codec_info->tag == CodecInfo::HISTOGRAM_EAC3) {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (!cmd_line->HasSwitch(switches::kEnableEac3Playback))
- return false;
- }
-#endif
if (audio_codecs)
audio_codecs->push_back(codec_info->tag);
return true;
@@ -277,6 +284,11 @@ static bool VerifyCodec(
base::android::BuildInfo::GetInstance()->sdk_int() < 19) {
return false;
}
+ // Opus is only supported on Lollipop+ (API Level 21).
+ if (codec_info->tag == CodecInfo::HISTOGRAM_OPUS &&
+ base::android::BuildInfo::GetInstance()->sdk_int() < 21) {
+ return false;
+ }
#endif
if (video_codecs)
video_codecs->push_back(codec_info->tag);
@@ -325,8 +337,8 @@ static bool CheckTypeAndCodecs(
return true;
}
- MEDIA_LOG(log_cb) << "A codecs parameter must be provided for '"
- << type << "'";
+ MEDIA_LOG(DEBUG, log_cb) << "A codecs parameter must be provided for '"
+ << type << "'";
return false;
}
@@ -347,8 +359,8 @@ static bool CheckTypeAndCodecs(
}
if (!found_codec) {
- MEDIA_LOG(log_cb) << "Codec '" << codec_id
- << "' is not supported for '" << type << "'";
+ MEDIA_LOG(DEBUG, log_cb) << "Codec '" << codec_id
+ << "' is not supported for '" << type << "'";
return false;
}
}
diff --git a/chromium/media/filters/test_video_frame_scheduler.cc b/chromium/media/filters/test_video_frame_scheduler.cc
deleted file mode 100644
index 9dba38d0759..00000000000
--- a/chromium/media/filters/test_video_frame_scheduler.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/test_video_frame_scheduler.h"
-
-#include "media/base/video_frame.h"
-
-namespace media {
-
-TestVideoFrameScheduler::ScheduledFrame::ScheduledFrame(
- const scoped_refptr<VideoFrame> frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb)
- : frame(frame), wall_ticks(wall_ticks), done_cb(done_cb) {
-}
-
-TestVideoFrameScheduler::ScheduledFrame::~ScheduledFrame() {
-}
-
-TestVideoFrameScheduler::TestVideoFrameScheduler() {
-}
-
-TestVideoFrameScheduler::~TestVideoFrameScheduler() {
-}
-
-void TestVideoFrameScheduler::ScheduleVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) {
- scheduled_frames_.push_back(ScheduledFrame(frame, wall_ticks, done_cb));
-}
-
-void TestVideoFrameScheduler::Reset() {
- scheduled_frames_.clear();
-}
-
-void TestVideoFrameScheduler::DisplayFramesUpTo(base::TimeTicks wall_ticks) {
- RunDoneCBForFramesUpTo(wall_ticks, DISPLAYED);
-}
-
-void TestVideoFrameScheduler::DropFramesUpTo(base::TimeTicks wall_ticks) {
- RunDoneCBForFramesUpTo(wall_ticks, DROPPED);
-}
-
-void TestVideoFrameScheduler::RunDoneCBForFramesUpTo(base::TimeTicks wall_ticks,
- Reason reason) {
- std::vector<ScheduledFrame> done_frames;
- std::vector<ScheduledFrame> remaining_frames;
-
- for (size_t i = 0; i < scheduled_frames_.size(); ++i) {
- if (scheduled_frames_[i].wall_ticks <= wall_ticks) {
- done_frames.push_back(scheduled_frames_[i]);
- } else {
- remaining_frames.push_back(scheduled_frames_[i]);
- }
- }
-
- scheduled_frames_.swap(remaining_frames);
-
- for (size_t i = 0; i < done_frames.size(); ++i) {
- done_frames[i].done_cb.Run(done_frames[i].frame, reason);
- }
-}
-
-} // namespace media
diff --git a/chromium/media/filters/test_video_frame_scheduler.h b/chromium/media/filters/test_video_frame_scheduler.h
deleted file mode 100644
index 9ed082e69ec..00000000000
--- a/chromium/media/filters/test_video_frame_scheduler.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_TEST_VIDEO_FRAME_SCHEDULER_H_
-#define MEDIA_FILTERS_TEST_VIDEO_FRAME_SCHEDULER_H_
-
-#include <vector>
-
-#include "media/filters/video_frame_scheduler.h"
-
-namespace media {
-
-// A scheduler that queues frames until told otherwise.
-class TestVideoFrameScheduler : public VideoFrameScheduler {
- public:
- struct ScheduledFrame {
- ScheduledFrame(const scoped_refptr<VideoFrame> frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb);
- ~ScheduledFrame();
-
- scoped_refptr<VideoFrame> frame;
- base::TimeTicks wall_ticks;
- DoneCB done_cb;
- };
-
- TestVideoFrameScheduler();
- ~TestVideoFrameScheduler() override;
-
- // VideoFrameScheduler implementation.
- void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) override;
- void Reset() override;
-
- // Displays all frames with scheduled times <= |wall_ticks|.
- void DisplayFramesUpTo(base::TimeTicks wall_ticks);
-
- // Drops all frames with scheduled times <= |wall_ticks|.
- void DropFramesUpTo(base::TimeTicks wall_ticks);
-
- const std::vector<ScheduledFrame>& scheduled_frames() const {
- return scheduled_frames_;
- }
-
- private:
- void RunDoneCBForFramesUpTo(base::TimeTicks wall_ticks, Reason reason);
-
- std::vector<ScheduledFrame> scheduled_frames_;
-
- DISALLOW_COPY_AND_ASSIGN(TestVideoFrameScheduler);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_TEST_VIDEO_FRAME_SCHEDULER_H_
diff --git a/chromium/media/filters/video_cadence_estimator.cc b/chromium/media/filters/video_cadence_estimator.cc
new file mode 100644
index 00000000000..bad4fd056b7
--- /dev/null
+++ b/chromium/media/filters/video_cadence_estimator.cc
@@ -0,0 +1,261 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/video_cadence_estimator.h"
+
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <string>
+
+#include "base/metrics/histogram_macros.h"
+
+namespace media {
+
+// To prevent oscillation in and out of cadence or between cadence values, we
+// require some time to elapse before a cadence switch is accepted.
+const int kMinimumCadenceDurationMs = 100;
+
+// Records the number of cadence changes to UMA.
+static void HistogramCadenceChangeCount(int cadence_changes) {
+ const int kCadenceChangeMax = 10;
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Media.VideoRenderer.CadenceChanges",
+ cadence_changes, 0, kCadenceChangeMax,
+ kCadenceChangeMax);
+}
+
+VideoCadenceEstimator::VideoCadenceEstimator(
+ base::TimeDelta minimum_time_until_max_drift)
+ : cadence_hysteresis_threshold_(
+ base::TimeDelta::FromMilliseconds(kMinimumCadenceDurationMs)),
+ minimum_time_until_max_drift_(minimum_time_until_max_drift) {
+ Reset();
+}
+
+VideoCadenceEstimator::~VideoCadenceEstimator() {
+}
+
+void VideoCadenceEstimator::Reset() {
+ cadence_.clear();
+ pending_cadence_.clear();
+ cadence_changes_ = render_intervals_cadence_held_ = 0;
+ first_update_call_ = true;
+}
+
+bool VideoCadenceEstimator::UpdateCadenceEstimate(
+ base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift) {
+ DCHECK_GT(render_interval, base::TimeDelta());
+ DCHECK_GT(frame_duration, base::TimeDelta());
+
+ base::TimeDelta time_until_max_drift;
+
+ // See if we can find a cadence which fits the data.
+ Cadence new_cadence =
+ CalculateCadence(render_interval, frame_duration, max_acceptable_drift,
+ &time_until_max_drift);
+
+ // If this is the first time UpdateCadenceEstimate() has been called,
+ // initialize the histogram with a zero count for cadence changes; this
+ // allows us to track the number of playbacks which have cadence at all.
+ if (first_update_call_) {
+ DCHECK_EQ(cadence_changes_, 0);
+ first_update_call_ = false;
+ HistogramCadenceChangeCount(0);
+ }
+
+ // If nothing changed, do nothing.
+ if (new_cadence == cadence_) {
+ // Clear cadence hold to pending values from accumulating incorrectly.
+ render_intervals_cadence_held_ = 0;
+ return false;
+ }
+
+ // Wait until enough render intervals have elapsed before accepting the
+ // cadence change. Prevents oscillation of the cadence selection.
+ bool update_pending_cadence = true;
+ if (new_cadence == pending_cadence_ ||
+ cadence_hysteresis_threshold_ <= render_interval) {
+ if (++render_intervals_cadence_held_ * render_interval >=
+ cadence_hysteresis_threshold_) {
+ DVLOG(1) << "Cadence switch: " << CadenceToString(cadence_) << " -> "
+ << CadenceToString(new_cadence)
+ << " :: Time until drift exceeded: " << time_until_max_drift;
+ cadence_.swap(new_cadence);
+
+ // Note: Because this class is transitively owned by a garbage collected
+ // object, WebMediaPlayer, we log cadence changes as they are encountered.
+ HistogramCadenceChangeCount(++cadence_changes_);
+ return true;
+ }
+
+ update_pending_cadence = false;
+ }
+
+ DVLOG(2) << "Hysteresis prevented cadence switch: "
+ << CadenceToString(cadence_) << " -> "
+ << CadenceToString(new_cadence);
+
+ if (update_pending_cadence) {
+ pending_cadence_.swap(new_cadence);
+ render_intervals_cadence_held_ = 1;
+ }
+
+ return false;
+}
+
+int VideoCadenceEstimator::GetCadenceForFrame(uint64_t frame_number) const {
+ DCHECK(has_cadence());
+ return cadence_[frame_number % cadence_.size()];
+}
+
+VideoCadenceEstimator::Cadence VideoCadenceEstimator::CalculateCadence(
+ base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ base::TimeDelta* time_until_max_drift) const {
+ // See if we can find a cadence which fits the data.
+ Cadence result;
+ if (CalculateOneFrameCadence(render_interval, frame_duration,
+ max_acceptable_drift, &result,
+ time_until_max_drift)) {
+ DCHECK_EQ(1u, result.size());
+ } else if (CalculateFractionalCadence(render_interval, frame_duration,
+ max_acceptable_drift, &result,
+ time_until_max_drift)) {
+ DCHECK(!result.empty());
+ } else if (CalculateOneFrameCadence(render_interval, frame_duration * 2,
+ max_acceptable_drift, &result,
+ time_until_max_drift)) {
+ // By finding cadence for double the frame duration, we're saying there
+ // exist two integers a and b, where a > b and a + b = |result|; this
+ // matches all patterns which regularly have half a frame per render
+ // interval; i.e. 24fps in 60hz.
+ DCHECK_EQ(1u, result.size());
+
+ // While we may find a two pattern cadence, sometimes one extra frame
+ // duration is enough to allow a match for 1-frame cadence if the
+ // |time_until_max_drift| was on the edge.
+ //
+ // All 2-frame cadence values should be odd, so we can detect this and fall
+ // back to 1-frame cadence when this occurs.
+ if (result[0] & 1) {
+ result[0] = std::ceil(result[0] / 2.0);
+ result.push_back(result[0] - 1);
+ } else {
+ result[0] /= 2;
+ }
+ }
+ return result;
+}
+
+bool VideoCadenceEstimator::CalculateOneFrameCadence(
+ base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ Cadence* cadence,
+ base::TimeDelta* time_until_max_drift) const {
+ DCHECK(cadence->empty());
+
+ // The perfect cadence is the number of render intervals per frame, while the
+ // clamped cadence is the nearest matching integer value.
+ //
+ // As mentioned in the introduction, |perfect_cadence| is the ratio of the
+ // frame duration to render interval length; while |clamped_cadence| is the
+ // nearest integer value to |perfect_cadence|.
+ const double perfect_cadence =
+ frame_duration.InSecondsF() / render_interval.InSecondsF();
+ const int clamped_cadence = perfect_cadence + 0.5;
+ if (!clamped_cadence)
+ return false;
+
+ // For cadence based rendering the actual frame duration is just the frame
+ // duration, while the |rendered_frame_duration| is how long the frame would
+ // be displayed for if we rendered it |clamped_cadence| times.
+ const base::TimeDelta rendered_frame_duration =
+ clamped_cadence * render_interval;
+ if (!IsAcceptableCadence(rendered_frame_duration, frame_duration,
+ max_acceptable_drift, time_until_max_drift)) {
+ return false;
+ }
+
+ cadence->push_back(clamped_cadence);
+ return true;
+}
+
+bool VideoCadenceEstimator::CalculateFractionalCadence(
+ base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ Cadence* cadence,
+ base::TimeDelta* time_until_max_drift) const {
+ DCHECK(cadence->empty());
+
+ // Fractional cadence allows us to see if we have a cadence which would look
+ // best if we consistently drop the same frames.
+ //
+ // In this case, the perfect cadence is the number of frames per render
+ // interval, while the clamped cadence is the nearest integer value.
+ const double perfect_cadence =
+ render_interval.InSecondsF() / frame_duration.InSecondsF();
+ const int clamped_cadence = perfect_cadence + 0.5;
+ if (!clamped_cadence)
+ return false;
+
+ // For fractional cadence, the rendered duration of each frame is just the
+ // render interval. While the actual frame duration is the total duration of
+ // all the frames we would end up dropping during that time.
+ const base::TimeDelta actual_frame_duration =
+ clamped_cadence * frame_duration;
+ if (!IsAcceptableCadence(render_interval, actual_frame_duration,
+ max_acceptable_drift, time_until_max_drift)) {
+ return false;
+ }
+
+ // Fractional cadence means we render the first of |clamped_cadence| frames
+ // and drop |clamped_cadence| - 1 frames.
+ cadence->insert(cadence->begin(), clamped_cadence, 0);
+ (*cadence)[0] = 1;
+ return true;
+}
+
+std::string VideoCadenceEstimator::CadenceToString(
+ const Cadence& cadence) const {
+ if (cadence.empty())
+ return std::string("[]");
+
+ std::ostringstream os;
+ os << "[";
+ std::copy(cadence.begin(), cadence.end() - 1,
+ std::ostream_iterator<int>(os, ":"));
+ os << cadence.back() << "]";
+ return os.str();
+}
+
+bool VideoCadenceEstimator::IsAcceptableCadence(
+ base::TimeDelta rendered_frame_duration,
+ base::TimeDelta actual_frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ base::TimeDelta* time_until_max_drift) const {
+ if (rendered_frame_duration == actual_frame_duration)
+ return true;
+
+ // Compute how long it'll take to exhaust the drift if frames are rendered for
+ // |rendered_frame_duration| instead of |actual_frame_duration|.
+ const double duration_delta =
+ (rendered_frame_duration - actual_frame_duration)
+ .magnitude()
+ .InMicroseconds();
+ const int64 frames_until_drift_exhausted =
+ std::ceil(max_acceptable_drift.InMicroseconds() / duration_delta);
+
+ // If the time until a frame would be repeated or dropped is greater than our
+ // limit of acceptability, the cadence is acceptable.
+ *time_until_max_drift =
+ rendered_frame_duration * frames_until_drift_exhausted;
+ return *time_until_max_drift >= minimum_time_until_max_drift_;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_cadence_estimator.h b/chromium/media/filters/video_cadence_estimator.h
new file mode 100644
index 00000000000..89b2436c0df
--- /dev/null
+++ b/chromium/media/filters/video_cadence_estimator.h
@@ -0,0 +1,199 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_VIDEO_CADENCE_ESTIMATOR_H_
+#define MEDIA_FILTERS_VIDEO_CADENCE_ESTIMATOR_H_
+
+#include <vector>
+
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Estimates whether a given frame duration and render interval length have a
+// render cadence which would allow for optimal uniformity of displayed frame
+// durations over time.
+//
+// Cadence is the ideal repeating frame pattern for a group of frames; currently
+// VideoCadenceEstimator supports 1-frame ([N]), 2-frame ([3:2]), and N-frame
+// fractional ([1:0:...:0]) cadences. Details on what this means are below.
+//
+// The perfect cadence of a set of frames is the ratio of the frame duration to
+// render interval length. I.e. for 30fps in 60Hz the cadence would be (1/30) /
+// (1/60) = 60 / 30 = 2. It's common that this is not an exact integer, e.g.,
+// 29.974fps in 60Hz which would have a cadence of (1/29.974) / (1/60) =
+// ~2.0029.
+//
+// The perfect cadence is always a real number. All N-cadences [a1:a2:..:aN]
+// where aK is an integer are an approximation of the perfect cadence; i.e. the
+// average of [a1:..:aN] will approximately equal the perfect cadence. When N=1
+// we have a 1-frame cadence, when N=2, we have a 2-frame cadence, etc.
+//
+// For single frame cadence we just round the perfect cadence (~2.0029 in the
+// previous example) to the nearest integer value (2 in this case; which is
+// denoted as a cadence of [2]). If the delta between those values is small we
+// can choose to render frames for the integer number of render intervals;
+// shortening or lengthening the actual rendered frame duration. Doing so
+// ensures each frame gets an optimal amount of display time.
+//
+// The delta between the perfect cadence and the rounded cadence leads to drift
+// over time of the actual VideoFrame timestamp relative to its rendered time,
+// so we perform some calculations to ensure we only use a cadence when it will
+// take some time to drift an undesirable amount; see CalculateCadence() for
+// details on how this calculation is made.
+//
+// 2-frame cadence is an extension of 1-frame cadence. Consider the case of
+// 24fps in 60Hz, which has a perfect cadence of 2.5; rounding up to a cadence
+// of 3 would cause drift to accumulate unusably fast. A better approximation
+// of this cadence would be [3:2].
+//
+// Fractional cadence is a special case of N-frame cadence which can be used
+// when the frame duration is shorter than the render interval; e.g. 120fps in
+// 60Hz. In this case, the first frame in each group of N frames is displayed
+// once, while the next N - 1 frames are dropped; i.e. the cadence is of the
+// form [1:0:..:0]. Using the previous example N = 120/60 = 2, which means the
+// cadence would be [1:0]. See CalculateFractionalCadence() for more details.
+//
+// In practice this works out to the following for common setups if we use
+// cadence based selection:
+//
+// 29.5fps in 60Hz, ~17ms max drift => exhausted in ~1 second.
+// 29.9fps in 60Hz, ~17ms max drift => exhausted in ~16.4 seconds.
+// 24fps in 60Hz, ~21ms max drift => exhausted in ~0.15 seconds.
+// 25fps in 60Hz, 20ms max drift => exhausted in ~4.0 seconds.
+// 59.9fps in 60Hz, ~8.3ms max drift => exhausted in ~8.2 seconds.
+// 24.9fps in 50Hz, ~20ms max drift => exhausted in ~20.5 seconds.
+// 120fps in 59.9Hz, ~8.3ms max drift => exhausted in ~8.2 seconds.
+//
+class MEDIA_EXPORT VideoCadenceEstimator {
+ public:
+ // As mentioned in the introduction, the determination of whether to clamp to
+ // a given cadence is based on how long it takes before a frame would have to
+ // be dropped or repeated to compensate for reaching the maximum acceptable
+ // drift; this time length is controlled by |minimum_time_until_max_drift|.
+ explicit VideoCadenceEstimator(base::TimeDelta minimum_time_until_max_drift);
+ ~VideoCadenceEstimator();
+
+ // Clears stored cadence information.
+ void Reset();
+
+ // Updates the estimates for |cadence_| based on the given values as described
+ // in the introduction above.
+ //
+ // Clients should call this and then update the cadence for all frames via the
+ // GetCadenceForFrame() method if the cadence changes.
+ //
+ // Cadence changes will not take affect until enough render intervals have
+ // elapsed. For the purposes of hysteresis, each UpdateCadenceEstimate() call
+ // is assumed to elapse one |render_interval| worth of time.
+ //
+ // Returns true if the cadence has changed since the last call.
+ bool UpdateCadenceEstimate(base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift);
+
+ // Returns true if a useful cadence was found.
+ bool has_cadence() const { return !cadence_.empty(); }
+
+ // Given a |frame_number|, where zero is the most recently rendered frame,
+ // returns the ideal cadence for that frame.
+ //
+ // Note: Callers must track the base |frame_number| relative to all frames
+ // rendered or removed after the first frame for which cadence is detected.
+ // The first frame after cadence is detected has a |frame_number| of 0.
+ //
+ // Frames which come in before the last rendered frame should be ignored in
+ // terms of impact to the base |frame_number|.
+ int GetCadenceForFrame(uint64_t frame_number) const;
+
+ void set_cadence_hysteresis_threshold_for_testing(base::TimeDelta threshold) {
+ cadence_hysteresis_threshold_ = threshold;
+ }
+
+ size_t cadence_size_for_testing() const { return cadence_.size(); }
+ std::string GetCadenceForTesting() const { return CadenceToString(cadence_); }
+
+ private:
+ using Cadence = std::vector<int>;
+
+ // Attempts to find a 1-frame, 2-frame, or N-frame fractional cadence; returns
+ // the cadence vector if cadence is found and sets |time_until_max_drift| for
+ // the computed cadence.
+ Cadence CalculateCadence(base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ base::TimeDelta* time_until_max_drift) const;
+
+ // Calculates the clamped cadence for the given |render_interval| and
+ // |frame_duration|, then calculates how long that cadence can be used before
+ // exhausting |max_acceptable_drift|. If the time until exhaustion is greater
+ // than |minimum_time_until_max_drift_|, returns true and sets |cadence| to
+ // the clamped cadence. If the clamped cadence is unusable, |cadence| will be
+ // set to zero.
+ //
+ // Sets |time_until_max_drift| to the computed glitch time. Set to zero if
+ // the clamped cadence is unusable.
+ bool CalculateOneFrameCadence(base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ Cadence* cadence,
+ base::TimeDelta* time_until_max_drift) const;
+
+ // Similar to CalculateCadence() except it tries to find the ideal number of
+ // frames which can fit into a |render_interval|; which means doing the same
+ // calculations as CalculateCadence() but with the ratio of |render_interval|
+ // to |frame_duration| instead of the other way around.
+ bool CalculateFractionalCadence(base::TimeDelta render_interval,
+ base::TimeDelta frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ Cadence* cadence,
+ base::TimeDelta* time_until_max_drift) const;
+
+ // Converts a cadence vector into a human readable string of the form
+ // "[a, b, ..., z]".
+ std::string CadenceToString(const Cadence& cadence) const;
+
+ // Returns true if the drift of the rendered frame duration versus its actual
+ // frame duration take longer than |minimum_time_until_max_drift_| to exhaust
+ // |max_acceptable_drift|. |time_until_max_drift| is set to how long it will
+ // take before a glitch (frame drop or repeat occurs).
+ bool IsAcceptableCadence(base::TimeDelta rendered_frame_duration,
+ base::TimeDelta actual_frame_duration,
+ base::TimeDelta max_acceptable_drift,
+ base::TimeDelta* time_until_max_drift) const;
+
+ // The approximate best N-frame cadence for all frames seen thus far; updated
+ // by UpdateCadenceEstimate(). Empty when no cadence has been detected.
+ Cadence cadence_;
+
+ // Used as hysteresis to prevent oscillation between cadence approximations
+ // for spurious blips in the render interval or frame duration.
+ //
+ // Once a new cadence is detected, |render_intervals_cadence_held_| is
+ // incremented for each UpdateCadenceEstimate() call where |cadence_| matches
+ // |pending_cadence_|. |render_intervals_cadence_held_| is cleared when a
+ // "new" cadence matches |cadence_| or |pending_cadence_|.
+ //
+ // Once |kMinimumCadenceDurationMs| is exceeded in render intervals, the
+ // detected cadence is set in |cadence_|.
+ Cadence pending_cadence_;
+ int render_intervals_cadence_held_;
+ base::TimeDelta cadence_hysteresis_threshold_;
+
+ // Tracks how many times cadence has switched during a given playback, used to
+ // histogram the number of cadence changes in a playback.
+ bool first_update_call_;
+ int cadence_changes_;
+
+ // The minimum amount of time allowed before a glitch occurs before confirming
+ // cadence for a given render interval and frame duration.
+ const base::TimeDelta minimum_time_until_max_drift_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCadenceEstimator);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VIDEO_CADENCE_ESTIMATOR_H_
diff --git a/chromium/media/filters/video_cadence_estimator_unittest.cc b/chromium/media/filters/video_cadence_estimator_unittest.cc
new file mode 100644
index 00000000000..d96700436c6
--- /dev/null
+++ b/chromium/media/filters/video_cadence_estimator_unittest.cc
@@ -0,0 +1,204 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/stringprintf.h"
+#include "media/filters/video_cadence_estimator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// See VideoCadenceEstimator header for more details.
+const int kMinimumAcceptableTimeBetweenGlitchesSecs = 8;
+
+// Slows down the given |fps| according to NTSC field reduction standards; see
+// http://en.wikipedia.org/wiki/Frame_rate#Digital_video_and_television
+static double NTSC(double fps) {
+ return fps / 1.001;
+}
+
+static base::TimeDelta Interval(double hertz) {
+ return base::TimeDelta::FromSecondsD(1.0 / hertz);
+}
+
+std::vector<int> CreateCadenceFromString(const std::string& cadence) {
+ std::vector<std::string> tokens;
+ CHECK_EQ('[', cadence[0]);
+ CHECK_EQ(']', cadence[cadence.length() - 1]);
+ base::SplitString(cadence.substr(1, cadence.length() - 2), ':', &tokens);
+
+ std::vector<int> result;
+ for (const auto& token : tokens) {
+ int cadence_value = 0;
+ CHECK(base::StringToInt(token, &cadence_value)) << token;
+ result.push_back(cadence_value);
+ }
+
+ return result;
+}
+
+static void VerifyCadenceVector(VideoCadenceEstimator* estimator,
+ double frame_hertz,
+ double render_hertz,
+ const std::string& expected_cadence) {
+ SCOPED_TRACE(base::StringPrintf("Checking %.03f fps into %0.03f", frame_hertz,
+ render_hertz));
+
+ const std::vector<int> expected_cadence_vector =
+ CreateCadenceFromString(expected_cadence);
+
+ estimator->Reset();
+ const base::TimeDelta acceptable_drift = Interval(frame_hertz) / 2;
+ const bool cadence_changed = estimator->UpdateCadenceEstimate(
+ Interval(render_hertz), Interval(frame_hertz), acceptable_drift);
+ EXPECT_EQ(cadence_changed, estimator->has_cadence());
+ EXPECT_EQ(expected_cadence_vector.empty(), !estimator->has_cadence());
+
+ // Nothing further to test.
+ if (expected_cadence_vector.empty())
+ return;
+
+ // Spot two cycles of the cadence.
+ for (size_t i = 0; i < expected_cadence_vector.size() * 2; ++i) {
+ ASSERT_EQ(expected_cadence_vector[i % expected_cadence_vector.size()],
+ estimator->GetCadenceForFrame(i));
+ }
+}
+
+// Spot check common display and frame rate pairs for correctness.
+TEST(VideoCadenceEstimatorTest, CadenceCalculations) {
+ VideoCadenceEstimator estimator(
+ base::TimeDelta::FromSeconds(kMinimumAcceptableTimeBetweenGlitchesSecs));
+ estimator.set_cadence_hysteresis_threshold_for_testing(base::TimeDelta());
+
+ const std::string kEmptyCadence = "[]";
+ VerifyCadenceVector(&estimator, 24, 60, "[3:2]");
+ VerifyCadenceVector(&estimator, NTSC(24), 60, "[3:2]");
+
+ VerifyCadenceVector(&estimator, 25, 60, kEmptyCadence);
+ VerifyCadenceVector(&estimator, NTSC(30), 60, "[2]");
+ VerifyCadenceVector(&estimator, 30, 60, "[2]");
+ VerifyCadenceVector(&estimator, 50, 60, kEmptyCadence);
+ VerifyCadenceVector(&estimator, NTSC(60), 60, "[1]");
+ VerifyCadenceVector(&estimator, 120, 60, "[1:0]");
+ VerifyCadenceVector(&estimator, 120, 24, "[1:0:0:0:0]");
+
+ // 50Hz is common in the EU.
+ VerifyCadenceVector(&estimator, NTSC(24), 50, kEmptyCadence);
+ VerifyCadenceVector(&estimator, 24, 50, kEmptyCadence);
+ VerifyCadenceVector(&estimator, NTSC(25), 50, "[2]");
+ VerifyCadenceVector(&estimator, 25, 50, "[2]");
+ VerifyCadenceVector(&estimator, NTSC(30), 50, kEmptyCadence);
+ VerifyCadenceVector(&estimator, 30, 50, kEmptyCadence);
+ VerifyCadenceVector(&estimator, NTSC(60), 50, kEmptyCadence);
+ VerifyCadenceVector(&estimator, 60, 50, kEmptyCadence);
+
+ VerifyCadenceVector(&estimator, 25, NTSC(60), kEmptyCadence);
+ VerifyCadenceVector(&estimator, 120, NTSC(60), kEmptyCadence);
+ VerifyCadenceVector(&estimator, 1, NTSC(60), "[60]");
+}
+
+TEST(VideoCadenceEstimatorTest, CadenceVariesWithAcceptableDrift) {
+ VideoCadenceEstimator estimator(
+ base::TimeDelta::FromSeconds(kMinimumAcceptableTimeBetweenGlitchesSecs));
+ estimator.set_cadence_hysteresis_threshold_for_testing(base::TimeDelta());
+
+ const base::TimeDelta render_interval = Interval(NTSC(60));
+ const base::TimeDelta frame_interval = Interval(120);
+
+ base::TimeDelta acceptable_drift = frame_interval / 2;
+ EXPECT_FALSE(estimator.UpdateCadenceEstimate(render_interval, frame_interval,
+ acceptable_drift));
+ EXPECT_FALSE(estimator.has_cadence());
+
+ // Increasing the acceptable drift should be result in more permissive
+ // detection of cadence.
+ acceptable_drift = render_interval;
+ EXPECT_TRUE(estimator.UpdateCadenceEstimate(render_interval, frame_interval,
+ acceptable_drift));
+ EXPECT_TRUE(estimator.has_cadence());
+ EXPECT_EQ("[1:0]", estimator.GetCadenceForTesting());
+}
+
+TEST(VideoCadenceEstimatorTest, CadenceVariesWithAcceptableGlitchTime) {
+ scoped_ptr<VideoCadenceEstimator> estimator(new VideoCadenceEstimator(
+ base::TimeDelta::FromSeconds(kMinimumAcceptableTimeBetweenGlitchesSecs)));
+ estimator->set_cadence_hysteresis_threshold_for_testing(base::TimeDelta());
+
+ const base::TimeDelta render_interval = Interval(NTSC(60));
+ const base::TimeDelta frame_interval = Interval(120);
+ const base::TimeDelta acceptable_drift = frame_interval / 2;
+
+ EXPECT_FALSE(estimator->UpdateCadenceEstimate(render_interval, frame_interval,
+ acceptable_drift));
+ EXPECT_FALSE(estimator->has_cadence());
+
+ // Decreasing the acceptable glitch time should be result in more permissive
+ // detection of cadence.
+ estimator.reset(new VideoCadenceEstimator(base::TimeDelta::FromSeconds(
+ kMinimumAcceptableTimeBetweenGlitchesSecs / 2)));
+ estimator->set_cadence_hysteresis_threshold_for_testing(base::TimeDelta());
+ EXPECT_TRUE(estimator->UpdateCadenceEstimate(render_interval, frame_interval,
+ acceptable_drift));
+ EXPECT_TRUE(estimator->has_cadence());
+ EXPECT_EQ("[1:0]", estimator->GetCadenceForTesting());
+}
+
+TEST(VideoCadenceEstimatorTest, CadenceHystersisPreventsOscillation) {
+ scoped_ptr<VideoCadenceEstimator> estimator(new VideoCadenceEstimator(
+ base::TimeDelta::FromSeconds(kMinimumAcceptableTimeBetweenGlitchesSecs)));
+
+ const base::TimeDelta render_interval = Interval(30);
+ const base::TimeDelta frame_interval = Interval(60);
+ const base::TimeDelta acceptable_drift = frame_interval / 2;
+ estimator->set_cadence_hysteresis_threshold_for_testing(render_interval * 2);
+
+ // Cadence hysteresis should prevent the cadence from taking effect yet.
+ EXPECT_FALSE(estimator->UpdateCadenceEstimate(render_interval, frame_interval,
+ acceptable_drift));
+ EXPECT_FALSE(estimator->has_cadence());
+
+ // A second call should exceed cadence hysteresis and take into effect.
+ EXPECT_TRUE(estimator->UpdateCadenceEstimate(render_interval, frame_interval,
+ acceptable_drift));
+ EXPECT_TRUE(estimator->has_cadence());
+
+ // One bad interval shouldn't cause cadence to drop
+ EXPECT_FALSE(estimator->UpdateCadenceEstimate(
+ render_interval, frame_interval * 0.75, acceptable_drift));
+ EXPECT_TRUE(estimator->has_cadence());
+
+ // Resumption of cadence should clear bad interval count.
+ EXPECT_FALSE(estimator->UpdateCadenceEstimate(render_interval, frame_interval,
+ acceptable_drift));
+ EXPECT_TRUE(estimator->has_cadence());
+
+ // So one more bad interval shouldn't cause cadence to drop
+ EXPECT_FALSE(estimator->UpdateCadenceEstimate(
+ render_interval, frame_interval * 0.75, acceptable_drift));
+ EXPECT_TRUE(estimator->has_cadence());
+
+ // Two bad intervals should.
+ EXPECT_TRUE(estimator->UpdateCadenceEstimate(
+ render_interval, frame_interval * 0.75, acceptable_drift));
+ EXPECT_FALSE(estimator->has_cadence());
+}
+
+TEST(VideoCadenceEstimatorTest, TwoFrameCadenceIsActuallyOneFrame) {
+ VideoCadenceEstimator estimator(
+ base::TimeDelta::FromSeconds(kMinimumAcceptableTimeBetweenGlitchesSecs));
+ estimator.set_cadence_hysteresis_threshold_for_testing(base::TimeDelta());
+
+ const base::TimeDelta render_interval =
+ base::TimeDelta::FromMicroseconds(16715);
+ const base::TimeDelta frame_duration =
+ base::TimeDelta::FromMicroseconds(33360);
+
+ EXPECT_TRUE(estimator.UpdateCadenceEstimate(render_interval, frame_duration,
+ frame_duration / 2));
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_decoder_selector_unittest.cc b/chromium/media/filters/video_decoder_selector_unittest.cc
index fdbcaaa0bca..86f4c0eca1c 100644
--- a/chromium/media/filters/video_decoder_selector_unittest.cc
+++ b/chromium/media/filters/video_decoder_selector_unittest.cc
@@ -87,13 +87,6 @@ class VideoDecoderSelectorTest : public ::testing::Test {
void InitializeDecoderSelector(DecryptorCapability decryptor_capability,
int num_decoders) {
- SetDecryptorReadyCB set_decryptor_ready_cb;
- if (decryptor_capability != kNoDecryptor) {
- set_decryptor_ready_cb =
- base::Bind(&VideoDecoderSelectorTest::SetDecryptorReadyCallback,
- base::Unretained(this));
- }
-
if (decryptor_capability == kDecryptOnly ||
decryptor_capability == kDecryptAndDecode) {
EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
@@ -116,6 +109,10 @@ class VideoDecoderSelectorTest : public ::testing::Test {
// Set and cancel DecryptorReadyCB but the callback is never fired.
EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
.Times(2);
+ } else if (decryptor_capability == kNoDecryptor) {
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
+ .WillRepeatedly(
+ RunCallback<0>(nullptr, base::Bind(&IgnoreCdmAttached)));
}
DCHECK_GE(all_decoders_.size(), static_cast<size_t>(num_decoders));
@@ -123,18 +120,19 @@ class VideoDecoderSelectorTest : public ::testing::Test {
all_decoders_.begin() + num_decoders, all_decoders_.end());
decoder_selector_.reset(new VideoDecoderSelector(
- message_loop_.message_loop_proxy(),
- all_decoders_.Pass(),
- set_decryptor_ready_cb));
+ message_loop_.message_loop_proxy(), all_decoders_.Pass()));
}
void SelectDecoder() {
decoder_selector_->SelectDecoder(
demuxer_stream_.get(),
- false,
+ base::Bind(&VideoDecoderSelectorTest::SetDecryptorReadyCallback,
+ base::Unretained(this)),
base::Bind(&VideoDecoderSelectorTest::MockOnDecoderSelected,
base::Unretained(this)),
base::Bind(&VideoDecoderSelectorTest::FrameReady,
+ base::Unretained(this)),
+ base::Bind(&VideoDecoderSelectorTest::OnWaitingForDecryptionKey,
base::Unretained(this)));
message_loop_.RunUntilIdle();
}
@@ -151,6 +149,10 @@ class VideoDecoderSelectorTest : public ::testing::Test {
NOTREACHED();
}
+ void OnWaitingForDecryptionKey() {
+ NOTREACHED();
+ }
+
// Declare |decoder_selector_| after |demuxer_stream_| and |decryptor_| since
// |demuxer_stream_| and |decryptor_| should outlive |decoder_selector_|.
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
diff --git a/chromium/media/filters/video_frame_scheduler.h b/chromium/media/filters/video_frame_scheduler.h
deleted file mode 100644
index f90726211cd..00000000000
--- a/chromium/media/filters/video_frame_scheduler.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_H_
-#define MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/time/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class VideoFrame;
-
-// Defines an abstract video frame scheduler that is capable of managing the
-// display of video frames at explicit times.
-class MEDIA_EXPORT VideoFrameScheduler {
- public:
- VideoFrameScheduler() {}
- virtual ~VideoFrameScheduler() {}
-
- enum Reason {
- DISPLAYED, // Frame was displayed.
- DROPPED, // Frame was dropped.
- };
- typedef base::Callback<void(const scoped_refptr<VideoFrame>&, Reason)> DoneCB;
-
- // Schedule |frame| to be displayed at |wall_ticks|, firing |done_cb| when
- // the scheduler has finished with the frame.
- //
- // To avoid reentrancy issues, |done_cb| is run on a separate calling stack.
- virtual void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) = 0;
-
- // Causes the scheduler to cancel any previously scheduled frames.
- //
- // There is no guarantee that |done_cb|'s for previously scheduled frames
- // will not be run. Clients should implement callback tracking/cancellation
- // if they are sensitive to old callbacks being run.
- virtual void Reset() = 0;
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_H_
diff --git a/chromium/media/filters/video_frame_scheduler_impl.cc b/chromium/media/filters/video_frame_scheduler_impl.cc
deleted file mode 100644
index ee06bb1cd96..00000000000
--- a/chromium/media/filters/video_frame_scheduler_impl.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/video_frame_scheduler_impl.h"
-
-#include <list>
-
-#include "base/single_thread_task_runner.h"
-#include "base/time/default_tick_clock.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-VideoFrameSchedulerImpl::VideoFrameSchedulerImpl(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const DisplayCB& display_cb)
- : task_runner_(task_runner),
- display_cb_(display_cb),
- tick_clock_(new base::DefaultTickClock()) {
-}
-
-VideoFrameSchedulerImpl::~VideoFrameSchedulerImpl() {
-}
-
-void VideoFrameSchedulerImpl::ScheduleVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(!frame->end_of_stream());
- pending_frames_.push(PendingFrame(frame, wall_ticks, done_cb));
- ResetTimerIfNecessary();
-}
-
-void VideoFrameSchedulerImpl::Reset() {
- pending_frames_ = PendingFrameQueue();
- timer_.Stop();
-}
-
-void VideoFrameSchedulerImpl::SetTickClockForTesting(
- scoped_ptr<base::TickClock> tick_clock) {
- tick_clock_.swap(tick_clock);
-}
-
-void VideoFrameSchedulerImpl::ResetTimerIfNecessary() {
- if (pending_frames_.empty()) {
- DCHECK(!timer_.IsRunning());
- return;
- }
-
- // Negative times will schedule the callback to run immediately.
- timer_.Stop();
- timer_.Start(FROM_HERE,
- pending_frames_.top().wall_ticks - tick_clock_->NowTicks(),
- base::Bind(&VideoFrameSchedulerImpl::OnTimerFired,
- base::Unretained(this)));
-}
-
-void VideoFrameSchedulerImpl::OnTimerFired() {
- base::TimeTicks now = tick_clock_->NowTicks();
-
- // Move all frames that have reached their deadline into a separate queue.
- std::list<PendingFrame> expired_frames;
- while (!pending_frames_.empty() && pending_frames_.top().wall_ticks <= now) {
- expired_frames.push_back(pending_frames_.top());
- pending_frames_.pop();
- }
-
- // Signal that all frames except for the last one as dropped.
- while (expired_frames.size() > 1) {
- expired_frames.front().done_cb.Run(expired_frames.front().frame, DROPPED);
- expired_frames.pop_front();
- }
-
- // Display the last expired frame.
- if (!expired_frames.empty()) {
- display_cb_.Run(expired_frames.front().frame);
- expired_frames.front().done_cb.Run(expired_frames.front().frame, DISPLAYED);
- expired_frames.pop_front();
- }
-
- ResetTimerIfNecessary();
-}
-
-VideoFrameSchedulerImpl::PendingFrame::PendingFrame(
- const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb)
- : frame(frame), wall_ticks(wall_ticks), done_cb(done_cb) {
-}
-
-VideoFrameSchedulerImpl::PendingFrame::~PendingFrame() {
-}
-
-bool VideoFrameSchedulerImpl::PendingFrame::operator<(
- const PendingFrame& other) const {
- // Flip the comparison as std::priority_queue<T>::top() returns the largest
- // element.
- //
- // Assume video frames with identical timestamps contain identical content.
- return wall_ticks > other.wall_ticks;
-}
-
-} // namespace media
diff --git a/chromium/media/filters/video_frame_scheduler_impl.h b/chromium/media/filters/video_frame_scheduler_impl.h
deleted file mode 100644
index 6398969f2c0..00000000000
--- a/chromium/media/filters/video_frame_scheduler_impl.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_IMPL_H_
-#define MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_IMPL_H_
-
-#include <queue>
-
-#include "base/memory/ref_counted.h"
-#include "base/timer/timer.h"
-#include "media/filters/video_frame_scheduler.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-class TickClock;
-}
-
-namespace media {
-
-// A scheduler that uses delayed tasks on a task runner for timing the display
-// of video frames.
-//
-// Single threaded. Calls must be on |task_runner|.
-class MEDIA_EXPORT VideoFrameSchedulerImpl : public VideoFrameScheduler {
- public:
- typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> DisplayCB;
-
- // |task_runner| is used for scheduling the delayed tasks.
- // |display_cb| is run when a frame is to be displayed.
- VideoFrameSchedulerImpl(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const DisplayCB& display_cb);
- ~VideoFrameSchedulerImpl() override;
-
- // VideoFrameScheduler implementation.
- void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) override;
- void Reset() override;
-
- void SetTickClockForTesting(scoped_ptr<base::TickClock> tick_clock);
-
- private:
- void ResetTimerIfNecessary();
- void OnTimerFired();
-
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- DisplayCB display_cb_;
- scoped_ptr<base::TickClock> tick_clock_;
- base::OneShotTimer<VideoFrameScheduler> timer_;
-
- struct PendingFrame {
- PendingFrame(const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb);
- ~PendingFrame();
-
- // For use with std::priority_queue<T>.
- bool operator<(const PendingFrame& other) const;
-
- scoped_refptr<VideoFrame> frame;
- base::TimeTicks wall_ticks;
- DoneCB done_cb;
- };
- typedef std::priority_queue<PendingFrame> PendingFrameQueue;
- PendingFrameQueue pending_frames_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerImpl);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_IMPL_H_
diff --git a/chromium/media/filters/video_frame_scheduler_impl_unittest.cc b/chromium/media/filters/video_frame_scheduler_impl_unittest.cc
deleted file mode 100644
index cf5ee0a3d45..00000000000
--- a/chromium/media/filters/video_frame_scheduler_impl_unittest.cc
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop/message_loop.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "media/base/test_helpers.h"
-#include "media/base/video_frame.h"
-#include "media/filters/video_frame_scheduler_impl.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-using testing::_;
-
-// NOTE: millisecond-level resolution is used for times as real delayed tasks
-// are posted. Don't use large values if you want to keep tests running fast.
-class VideoFrameSchedulerImplTest : public testing::Test {
- public:
- VideoFrameSchedulerImplTest()
- : scheduler_(message_loop_.message_loop_proxy(),
- base::Bind(&VideoFrameSchedulerImplTest::OnDisplay,
- base::Unretained(this))),
- tick_clock_(new base::SimpleTestTickClock()) {
- scheduler_.SetTickClockForTesting(scoped_ptr<base::TickClock>(tick_clock_));
- }
-
- virtual ~VideoFrameSchedulerImplTest() {}
-
- MOCK_METHOD1(OnDisplay, void(const scoped_refptr<VideoFrame>&));
- MOCK_METHOD2(OnFrameDone,
- void(const scoped_refptr<VideoFrame>&,
- VideoFrameScheduler::Reason));
-
- void Schedule(const scoped_refptr<VideoFrame>& frame, int64 target_ms) {
- scheduler_.ScheduleVideoFrame(
- frame,
- base::TimeTicks() + base::TimeDelta::FromMilliseconds(target_ms),
- base::Bind(&VideoFrameSchedulerImplTest::OnFrameDone,
- base::Unretained(this)));
- }
-
- void RunUntilTimeHasElapsed(int64 ms) {
- WaitableMessageLoopEvent waiter;
- message_loop_.PostDelayedTask(
- FROM_HERE, waiter.GetClosure(), base::TimeDelta::FromMilliseconds(ms));
- waiter.RunAndWait();
- }
-
- void AdvanceTime(int64 ms) {
- tick_clock_->Advance(base::TimeDelta::FromMilliseconds(ms));
- }
-
- void Reset() {
- scheduler_.Reset();
- }
-
- private:
- base::MessageLoop message_loop_;
- VideoFrameSchedulerImpl scheduler_;
- base::SimpleTestTickClock* tick_clock_; // Owned by |scheduler_|.
-
- DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerImplTest);
-};
-
-TEST_F(VideoFrameSchedulerImplTest, ImmediateDisplay) {
- scoped_refptr<VideoFrame> frame =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- Schedule(frame, 0);
-
- EXPECT_CALL(*this, OnDisplay(frame));
- EXPECT_CALL(*this, OnFrameDone(frame, VideoFrameScheduler::DISPLAYED));
- RunUntilTimeHasElapsed(0);
-}
-
-TEST_F(VideoFrameSchedulerImplTest, EventualDisplay) {
- scoped_refptr<VideoFrame> frame =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- Schedule(frame, 10);
-
- // Nothing should happen.
- RunUntilTimeHasElapsed(10);
-
- // Now we should get the frame.
- EXPECT_CALL(*this, OnDisplay(frame));
- EXPECT_CALL(*this, OnFrameDone(frame, VideoFrameScheduler::DISPLAYED));
- AdvanceTime(10);
- RunUntilTimeHasElapsed(10);
-}
-
-TEST_F(VideoFrameSchedulerImplTest, DroppedFrame) {
- scoped_refptr<VideoFrame> dropped =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- scoped_refptr<VideoFrame> displayed =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- Schedule(dropped, 10);
- Schedule(displayed, 20);
-
- // The frame past its deadline will get dropped.
- EXPECT_CALL(*this, OnDisplay(displayed));
- EXPECT_CALL(*this, OnFrameDone(dropped, VideoFrameScheduler::DROPPED));
- EXPECT_CALL(*this, OnFrameDone(displayed, VideoFrameScheduler::DISPLAYED));
- AdvanceTime(20);
- RunUntilTimeHasElapsed(20);
-}
-
-TEST_F(VideoFrameSchedulerImplTest, SingleFrameLate) {
- scoped_refptr<VideoFrame> frame =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- Schedule(frame, 10);
-
- // Despite frame being late it should still get displayed as it's the only
- // one.
- EXPECT_CALL(*this, OnDisplay(frame));
- EXPECT_CALL(*this, OnFrameDone(frame, VideoFrameScheduler::DISPLAYED));
- AdvanceTime(20);
- RunUntilTimeHasElapsed(20);
-}
-
-TEST_F(VideoFrameSchedulerImplTest, ManyFramesLate) {
- scoped_refptr<VideoFrame> dropped =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- scoped_refptr<VideoFrame> displayed =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- Schedule(dropped, 10);
- Schedule(displayed, 20);
-
- // Despite both being late, the scheduler should always displays the latest
- // expired frame.
- EXPECT_CALL(*this, OnDisplay(displayed));
- EXPECT_CALL(*this, OnFrameDone(dropped, VideoFrameScheduler::DROPPED));
- EXPECT_CALL(*this, OnFrameDone(displayed, VideoFrameScheduler::DISPLAYED));
- AdvanceTime(30);
- RunUntilTimeHasElapsed(30);
-}
-
-TEST_F(VideoFrameSchedulerImplTest, Reset) {
- scoped_refptr<VideoFrame> frame =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
- Schedule(frame, 10);
-
- // Despite being on time, frame callback isn't run.
- EXPECT_CALL(*this, OnFrameDone(_, _)).Times(0);
- AdvanceTime(10);
- Reset();
- RunUntilTimeHasElapsed(10);
-}
-
-} // namespace media
diff --git a/chromium/media/filters/video_frame_scheduler_proxy.cc b/chromium/media/filters/video_frame_scheduler_proxy.cc
deleted file mode 100644
index 590412e6dca..00000000000
--- a/chromium/media/filters/video_frame_scheduler_proxy.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/video_frame_scheduler_proxy.h"
-
-#include "base/single_thread_task_runner.h"
-#include "media/base/bind_to_current_loop.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-VideoFrameSchedulerProxy::VideoFrameSchedulerProxy(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const scoped_refptr<base::SingleThreadTaskRunner>& scheduler_runner,
- scoped_ptr<VideoFrameScheduler> scheduler)
- : task_runner_(task_runner),
- scheduler_runner_(scheduler_runner),
- scheduler_(scheduler.Pass()),
- weak_factory_(this) {
-}
-
-VideoFrameSchedulerProxy::~VideoFrameSchedulerProxy() {
- scheduler_runner_->DeleteSoon(FROM_HERE, scheduler_.release());
-}
-
-void VideoFrameSchedulerProxy::ScheduleVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- scheduler_runner_->PostTask(
- FROM_HERE,
- base::Bind(&VideoFrameScheduler::ScheduleVideoFrame,
- base::Unretained(scheduler_.get()),
- frame,
- wall_ticks,
- BindToCurrentLoop(done_cb)));
-}
-
-void VideoFrameSchedulerProxy::Reset() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- scheduler_runner_->PostTask(FROM_HERE,
- base::Bind(&VideoFrameScheduler::Reset,
- base::Unretained(scheduler_.get())));
-}
-
-} // namespace media
diff --git a/chromium/media/filters/video_frame_scheduler_proxy.h b/chromium/media/filters/video_frame_scheduler_proxy.h
deleted file mode 100644
index 1c25ff81ee8..00000000000
--- a/chromium/media/filters/video_frame_scheduler_proxy.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_PROXY_H_
-#define MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_PROXY_H_
-
-#include "base/memory/weak_ptr.h"
-#include "media/filters/video_frame_scheduler.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-}
-
-namespace media {
-
-// Provides a thread-safe proxy for a VideoFrameScheduler. Typical use is to
-// use a real VideoFrameScheduler on the task runner responsible for graphics
-// display and provide a proxy on the task runner responsible for background
-// video decoding.
-class MEDIA_EXPORT VideoFrameSchedulerProxy : public VideoFrameScheduler {
- public:
- // |task_runner| is the runner that this object will be called on.
- // |scheduler_runner| is the runner that |scheduler| will be called on.
- // |scheduler| will be deleted on |scheduler_runner|.
- VideoFrameSchedulerProxy(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const scoped_refptr<base::SingleThreadTaskRunner>& scheduler_runner,
- scoped_ptr<VideoFrameScheduler> scheduler);
- ~VideoFrameSchedulerProxy() override;
-
- // VideoFrameScheduler implementation.
- void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks wall_ticks,
- const DoneCB& done_cb) override;
- void Reset() override;
-
- private:
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- scoped_refptr<base::SingleThreadTaskRunner> scheduler_runner_;
- scoped_ptr<VideoFrameScheduler> scheduler_;
-
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<VideoFrameSchedulerProxy> weak_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerProxy);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_PROXY_H_
diff --git a/chromium/media/filters/video_frame_scheduler_unittest.cc b/chromium/media/filters/video_frame_scheduler_unittest.cc
deleted file mode 100644
index 02b64ae8263..00000000000
--- a/chromium/media/filters/video_frame_scheduler_unittest.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/debug/stack_trace.h"
-#include "base/run_loop.h"
-#include "media/base/video_frame.h"
-#include "media/filters/clockless_video_frame_scheduler.h"
-#include "media/filters/test_video_frame_scheduler.h"
-#include "media/filters/video_frame_scheduler_impl.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static void DoNothing(const scoped_refptr<VideoFrame>& frame) {
-}
-
-static void CheckForReentrancy(std::string* stack_trace,
- const scoped_refptr<VideoFrame>& frame,
- VideoFrameScheduler::Reason reason) {
- *stack_trace = base::debug::StackTrace().ToString();
- base::MessageLoop::current()->PostTask(FROM_HERE,
- base::MessageLoop::QuitClosure());
-}
-
-// Type parameterized test harness for validating API contract of
-// VideoFrameScheduler implementations.
-//
-// NOTE: C++ requires using "this" for derived class templates when referencing
-// class members.
-template <typename T>
-class VideoFrameSchedulerTest : public testing::Test {
- public:
- VideoFrameSchedulerTest() {}
- virtual ~VideoFrameSchedulerTest() {}
-
- base::MessageLoop message_loop_;
- T scheduler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerTest);
-};
-
-template <>
-VideoFrameSchedulerTest<ClocklessVideoFrameScheduler>::VideoFrameSchedulerTest()
- : scheduler_(base::Bind(&DoNothing)) {
-}
-
-template <>
-VideoFrameSchedulerTest<VideoFrameSchedulerImpl>::VideoFrameSchedulerTest()
- : scheduler_(message_loop_.message_loop_proxy(), base::Bind(&DoNothing)) {
-}
-
-TYPED_TEST_CASE_P(VideoFrameSchedulerTest);
-
-TYPED_TEST_P(VideoFrameSchedulerTest, ScheduleVideoFrameIsntReentrant) {
- scoped_refptr<VideoFrame> frame =
- VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
-
- std::string stack_trace;
- this->scheduler_.ScheduleVideoFrame(
- frame, base::TimeTicks(), base::Bind(&CheckForReentrancy, &stack_trace));
- EXPECT_TRUE(stack_trace.empty()) << "Reentracy detected:\n" << stack_trace;
-}
-
-REGISTER_TYPED_TEST_CASE_P(VideoFrameSchedulerTest,
- ScheduleVideoFrameIsntReentrant);
-
-INSTANTIATE_TYPED_TEST_CASE_P(ClocklessVideoFrameScheduler,
- VideoFrameSchedulerTest,
- ClocklessVideoFrameScheduler);
-INSTANTIATE_TYPED_TEST_CASE_P(VideoFrameSchedulerImpl,
- VideoFrameSchedulerTest,
- VideoFrameSchedulerImpl);
-INSTANTIATE_TYPED_TEST_CASE_P(TestVideoFrameScheduler,
- VideoFrameSchedulerTest,
- TestVideoFrameScheduler);
-
-} // namespace media
diff --git a/chromium/media/filters/video_frame_stream_unittest.cc b/chromium/media/filters/video_frame_stream_unittest.cc
index 3a19e558a80..d35b57b84c6 100644
--- a/chromium/media/filters/video_frame_stream_unittest.cc
+++ b/chromium/media/filters/video_frame_stream_unittest.cc
@@ -5,11 +5,11 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/message_loop/message_loop.h"
+#include "media/base/fake_demuxer_stream.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/filters/decoder_stream.h"
-#include "media/filters/fake_demuxer_stream.h"
#include "media/filters/fake_video_decoder.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -22,7 +22,7 @@ using ::testing::NiceMock;
using ::testing::Return;
using ::testing::SaveArg;
-static const int kNumConfigs = 3;
+static const int kNumConfigs = 4;
static const int kNumBuffersInOneConfig = 5;
// Use anonymous namespace here to prevent the actions to be defined multiple
@@ -65,25 +65,37 @@ class VideoFrameStreamTest
kNumBuffersInOneConfig,
GetParam().is_encrypted)),
decryptor_(new NiceMock<MockDecryptor>()),
- decoder_(new FakeVideoDecoder(GetParam().decoding_delay,
- GetParam().parallel_decoding)),
+ decoder1_(new FakeVideoDecoder(
+ GetParam().decoding_delay,
+ GetParam().parallel_decoding,
+ base::Bind(&VideoFrameStreamTest::OnBytesDecoded,
+ base::Unretained(this)))),
+ decoder2_(new FakeVideoDecoder(
+ GetParam().decoding_delay,
+ GetParam().parallel_decoding,
+ base::Bind(&VideoFrameStreamTest::OnBytesDecoded,
+ base::Unretained(this)))),
+ decoder3_(new FakeVideoDecoder(
+ GetParam().decoding_delay,
+ GetParam().parallel_decoding,
+ base::Bind(&VideoFrameStreamTest::OnBytesDecoded,
+ base::Unretained(this)))),
+
is_initialized_(false),
num_decoded_frames_(0),
pending_initialize_(false),
pending_read_(false),
pending_reset_(false),
pending_stop_(false),
- total_bytes_decoded_(0),
+ num_decoded_bytes_unreported_(0),
has_no_key_(false) {
ScopedVector<VideoDecoder> decoders;
- decoders.push_back(decoder_);
+ decoders.push_back(decoder1_);
+ decoders.push_back(decoder2_);
+ decoders.push_back(decoder3_);
video_frame_stream_.reset(new VideoFrameStream(
- message_loop_.message_loop_proxy(),
- decoders.Pass(),
- base::Bind(&VideoFrameStreamTest::SetDecryptorReadyCallback,
- base::Unretained(this)),
- new MediaLog()));
+ message_loop_.message_loop_proxy(), decoders.Pass(), new MediaLog()));
// Decryptor can only decrypt (not decrypt-and-decode) so that
// DecryptingDemuxerStream will be used.
@@ -95,11 +107,12 @@ class VideoFrameStreamTest
~VideoFrameStreamTest() {
// Check that the pipeline statistics callback was fired correctly.
- if (decoder_)
- EXPECT_EQ(decoder_->total_bytes_decoded(), total_bytes_decoded_);
+ EXPECT_EQ(num_decoded_bytes_unreported_, 0);
is_initialized_ = false;
- decoder_ = NULL;
+ decoder1_ = NULL;
+ decoder2_ = NULL;
+ decoder3_ = NULL;
video_frame_stream_.reset();
message_loop_.RunUntilIdle();
@@ -112,9 +125,14 @@ class VideoFrameStreamTest
MOCK_METHOD1(OnNewSpliceBuffer, void(base::TimeDelta));
MOCK_METHOD1(SetDecryptorReadyCallback, void(const media::DecryptorReadyCB&));
MOCK_METHOD1(DecryptorSet, void(bool));
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
void OnStatistics(const PipelineStatistics& statistics) {
- total_bytes_decoded_ += statistics.video_bytes_decoded;
+ num_decoded_bytes_unreported_ -= statistics.video_bytes_decoded;
+ }
+
+ void OnBytesDecoded(int count) {
+ num_decoded_bytes_unreported_ += count;
}
void OnInitialized(bool success) {
@@ -124,17 +142,22 @@ class VideoFrameStreamTest
pending_initialize_ = false;
is_initialized_ = success;
- if (!success)
- decoder_ = NULL;
+ if (!success) {
+ decoder1_ = NULL;
+ decoder2_ = NULL;
+ decoder3_ = NULL;
+ }
}
void InitializeVideoFrameStream() {
pending_initialize_ = true;
video_frame_stream_->Initialize(
- demuxer_stream_.get(),
- false,
+ demuxer_stream_.get(), base::Bind(&VideoFrameStreamTest::OnInitialized,
+ base::Unretained(this)),
+ base::Bind(&VideoFrameStreamTest::SetDecryptorReadyCallback,
+ base::Unretained(this)),
base::Bind(&VideoFrameStreamTest::OnStatistics, base::Unretained(this)),
- base::Bind(&VideoFrameStreamTest::OnInitialized,
+ base::Bind(&VideoFrameStreamTest::OnWaitingForDecryptionKey,
base::Unretained(this)));
message_loop_.RunUntilIdle();
}
@@ -153,6 +176,8 @@ class VideoFrameStreamTest
DCHECK_EQ(stream_type, Decryptor::kVideo);
scoped_refptr<DecoderBuffer> decrypted =
DecoderBuffer::CopyFrom(encrypted->data(), encrypted->data_size());
+ if (encrypted->is_key_frame())
+ decrypted->set_is_key_frame(true);
decrypted->set_timestamp(encrypted->timestamp());
decrypted->set_duration(encrypted->duration());
decrypt_cb.Run(Decryptor::kSuccess, decrypted);
@@ -228,6 +253,10 @@ class VideoFrameStreamTest
}
void EnterPendingState(PendingState state) {
+ EnterPendingState(state, decoder1_);
+ }
+
+ void EnterPendingState(PendingState state, FakeVideoDecoder* decoder) {
DCHECK_NE(state, NOT_PENDING);
switch (state) {
case DEMUXER_READ_NORMAL:
@@ -249,6 +278,8 @@ class VideoFrameStreamTest
break;
case DECRYPTOR_NO_KEY:
+ if (GetParam().is_encrypted)
+ EXPECT_CALL(*this, OnWaitingForDecryptionKey());
ExpectDecryptorNotification();
has_no_key_ = true;
ReadOneFrame();
@@ -256,22 +287,22 @@ class VideoFrameStreamTest
case DECODER_INIT:
ExpectDecryptorNotification();
- decoder_->HoldNextInit();
+ decoder->HoldNextInit();
InitializeVideoFrameStream();
break;
case DECODER_REINIT:
- decoder_->HoldNextInit();
+ decoder->HoldNextInit();
ReadUntilPending();
break;
case DECODER_DECODE:
- decoder_->HoldDecode();
+ decoder->HoldDecode();
ReadUntilPending();
break;
case DECODER_RESET:
- decoder_->HoldNextReset();
+ decoder->HoldNextReset();
pending_reset_ = true;
video_frame_stream_->Reset(base::Bind(&VideoFrameStreamTest::OnReset,
base::Unretained(this)));
@@ -285,6 +316,10 @@ class VideoFrameStreamTest
}
void SatisfyPendingCallback(PendingState state) {
+ SatisfyPendingCallback(state, decoder1_);
+ }
+
+ void SatisfyPendingCallback(PendingState state, FakeVideoDecoder* decoder) {
DCHECK_NE(state, NOT_PENDING);
switch (state) {
case DEMUXER_READ_NORMAL:
@@ -300,19 +335,19 @@ class VideoFrameStreamTest
break;
case DECODER_INIT:
- decoder_->SatisfyInit();
+ decoder->SatisfyInit();
break;
case DECODER_REINIT:
- decoder_->SatisfyInit();
+ decoder->SatisfyInit();
break;
case DECODER_DECODE:
- decoder_->SatisfyDecode();
+ decoder->SatisfyDecode();
break;
case DECODER_RESET:
- decoder_->SatisfyReset();
+ decoder->SatisfyReset();
break;
case NOT_PENDING:
@@ -338,6 +373,11 @@ class VideoFrameStreamTest
SatisfyPendingCallback(DECODER_RESET);
}
+ void ReadUntilDecoderReinitialized(FakeVideoDecoder* decoder) {
+ EnterPendingState(DECODER_REINIT, decoder);
+ SatisfyPendingCallback(DECODER_REINIT, decoder);
+ }
+
base::MessageLoop message_loop_;
scoped_ptr<VideoFrameStream> video_frame_stream_;
@@ -345,7 +385,11 @@ class VideoFrameStreamTest
// Use NiceMock since we don't care about most of calls on the decryptor,
// e.g. RegisterNewKeyCB().
scoped_ptr<NiceMock<MockDecryptor> > decryptor_;
- FakeVideoDecoder* decoder_; // Owned by |video_frame_stream_|.
+ // Three decoders are needed to test that decoder fallback can occur more than
+ // once on a config change. They are owned by |video_frame_stream_|.
+ FakeVideoDecoder* decoder1_;
+ FakeVideoDecoder* decoder2_;
+ FakeVideoDecoder* decoder3_;
bool is_initialized_;
int num_decoded_frames_;
@@ -353,7 +397,7 @@ class VideoFrameStreamTest
bool pending_read_;
bool pending_reset_;
bool pending_stop_;
- int total_bytes_decoded_;
+ int num_decoded_bytes_unreported_;
scoped_refptr<VideoFrame> frame_read_;
VideoFrameStream::Status last_read_status_;
@@ -392,6 +436,14 @@ TEST_P(VideoFrameStreamTest, Initialization) {
Initialize();
}
+TEST_P(VideoFrameStreamTest, DecoderInitializationFails) {
+ decoder1_->SimulateFailureToInit();
+ decoder2_->SimulateFailureToInit();
+ decoder3_->SimulateFailureToInit();
+ Initialize();
+ EXPECT_FALSE(is_initialized_);
+}
+
TEST_P(VideoFrameStreamTest, ReadOneFrame) {
Initialize();
Read();
@@ -446,7 +498,7 @@ TEST_P(VideoFrameStreamTest, Read_BlockedDemuxerAndDecoder) {
Initialize();
demuxer_stream_->HoldNextRead();
- decoder_->HoldDecode();
+ decoder1_->HoldDecode();
ReadOneFrame();
EXPECT_TRUE(pending_read_);
@@ -461,7 +513,7 @@ TEST_P(VideoFrameStreamTest, Read_BlockedDemuxerAndDecoder) {
// Always keep one decode request pending.
if (demuxed_buffers > 1) {
- decoder_->SatisfySingleDecode();
+ decoder1_->SatisfySingleDecode();
message_loop_.RunUntilIdle();
}
}
@@ -471,12 +523,12 @@ TEST_P(VideoFrameStreamTest, Read_BlockedDemuxerAndDecoder) {
// Unblocking one decode request should unblock read even when demuxer is
// still blocked.
- decoder_->SatisfySingleDecode();
+ decoder1_->SatisfySingleDecode();
message_loop_.RunUntilIdle();
EXPECT_FALSE(pending_read_);
// Stream should still be blocked on the demuxer after unblocking the decoder.
- decoder_->SatisfyDecode();
+ decoder1_->SatisfyDecode();
ReadUntilPending();
EXPECT_TRUE(pending_read_);
@@ -497,7 +549,7 @@ TEST_P(VideoFrameStreamTest, Read_DuringEndOfStreamDecode) {
return;
Initialize();
- decoder_->HoldDecode();
+ decoder1_->HoldDecode();
// Read all of the frames up to end of stream. Since parallel decoding is
// enabled, the end of stream buffer will be sent to the decoder immediately,
@@ -506,7 +558,7 @@ TEST_P(VideoFrameStreamTest, Read_DuringEndOfStreamDecode) {
for (int frame = 0; frame < kNumBuffersInOneConfig; frame++) {
ReadOneFrame();
while (pending_read_) {
- decoder_->SatisfySingleDecode();
+ decoder1_->SatisfySingleDecode();
message_loop_.RunUntilIdle();
}
}
@@ -517,7 +569,7 @@ TEST_P(VideoFrameStreamTest, Read_DuringEndOfStreamDecode) {
ASSERT_TRUE(pending_read_);
// Satisfy decoding of the end of stream buffer. The read should complete.
- decoder_->SatisfySingleDecode();
+ decoder1_->SatisfySingleDecode();
message_loop_.RunUntilIdle();
ASSERT_FALSE(pending_read_);
EXPECT_EQ(last_read_status_, VideoFrameStream::OK);
@@ -690,6 +742,11 @@ TEST_P(VideoFrameStreamTest, Destroy_AfterConfigChangeRead) {
SatisfyPendingCallback(DEMUXER_READ_CONFIG_CHANGE);
}
+TEST_P(VideoFrameStreamTest, Destroy_DuringDecoderReinitialization) {
+ Initialize();
+ EnterPendingState(DECODER_REINIT);
+}
+
TEST_P(VideoFrameStreamTest, Destroy_DuringNoKeyRead) {
Initialize();
EnterPendingState(DECRYPTOR_NO_KEY);
@@ -727,7 +784,7 @@ TEST_P(VideoFrameStreamTest, Destroy_AfterRead_AfterReset) {
TEST_P(VideoFrameStreamTest, DecoderErrorWhenReading) {
Initialize();
EnterPendingState(DECODER_DECODE);
- decoder_->SimulateError();
+ decoder1_->SimulateError();
message_loop_.RunUntilIdle();
ASSERT_FALSE(pending_read_);
ASSERT_EQ(VideoFrameStream::DECODE_ERROR, last_read_status_);
@@ -736,18 +793,18 @@ TEST_P(VideoFrameStreamTest, DecoderErrorWhenReading) {
TEST_P(VideoFrameStreamTest, DecoderErrorWhenNotReading) {
Initialize();
- decoder_->HoldDecode();
+ decoder1_->HoldDecode();
ReadOneFrame();
EXPECT_TRUE(pending_read_);
// Satisfy decode requests until we get the first frame out.
while (pending_read_) {
- decoder_->SatisfySingleDecode();
+ decoder1_->SatisfySingleDecode();
message_loop_.RunUntilIdle();
}
// Trigger an error in the decoding.
- decoder_->SimulateError();
+ decoder1_->SimulateError();
// The error must surface from Read() as DECODE_ERROR.
while (last_read_status_ == VideoFrameStream::OK) {
@@ -758,4 +815,45 @@ TEST_P(VideoFrameStreamTest, DecoderErrorWhenNotReading) {
EXPECT_EQ(VideoFrameStream::DECODE_ERROR, last_read_status_);
}
+TEST_P(VideoFrameStreamTest, FallbackDecoderSelectedOnFailureToReinitialize) {
+ Initialize();
+ decoder1_->SimulateFailureToInit();
+ ReadUntilDecoderReinitialized(decoder1_);
+ ReadAllFrames();
+}
+
+TEST_P(VideoFrameStreamTest,
+ FallbackDecoderSelectedOnFailureToReinitialize_Twice) {
+ Initialize();
+ decoder1_->SimulateFailureToInit();
+ ReadUntilDecoderReinitialized(decoder1_);
+ ReadOneFrame();
+ decoder2_->SimulateFailureToInit();
+ ReadUntilDecoderReinitialized(decoder2_);
+ ReadAllFrames();
+}
+
+TEST_P(VideoFrameStreamTest, DecodeErrorAfterFallbackDecoderSelectionFails) {
+ Initialize();
+ decoder1_->SimulateFailureToInit();
+ decoder2_->SimulateFailureToInit();
+ decoder3_->SimulateFailureToInit();
+ ReadUntilDecoderReinitialized(decoder1_);
+ // The error will surface from Read() as DECODE_ERROR.
+ while (last_read_status_ == VideoFrameStream::OK) {
+ ReadOneFrame();
+ message_loop_.RunUntilIdle();
+ EXPECT_FALSE(pending_read_);
+ }
+ EXPECT_EQ(VideoFrameStream::DECODE_ERROR, last_read_status_);
+}
+
+TEST_P(VideoFrameStreamTest, Destroy_DuringFallbackDecoderSelection) {
+ Initialize();
+ decoder1_->SimulateFailureToInit();
+ EnterPendingState(DECODER_REINIT);
+ decoder2_->HoldNextInit();
+ SatisfyPendingCallback(DECODER_REINIT);
+}
+
} // namespace media
diff --git a/chromium/media/filters/video_renderer_algorithm.cc b/chromium/media/filters/video_renderer_algorithm.cc
new file mode 100644
index 00000000000..59b6d51be53
--- /dev/null
+++ b/chromium/media/filters/video_renderer_algorithm.cc
@@ -0,0 +1,689 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/video_renderer_algorithm.h"
+
+#include <algorithm>
+#include <limits>
+
+namespace media {
+
+// The number of frames to store for moving average calculations. Value picked
+// after experimenting with playback of various local media and YouTube clips.
+const int kMovingAverageSamples = 32;
+
+VideoRendererAlgorithm::ReadyFrame::ReadyFrame(
+ const scoped_refptr<VideoFrame>& ready_frame)
+ : frame(ready_frame),
+ has_estimated_end_time(true),
+ ideal_render_count(0),
+ render_count(0),
+ drop_count(0) {
+}
+
+VideoRendererAlgorithm::ReadyFrame::~ReadyFrame() {
+}
+
+bool VideoRendererAlgorithm::ReadyFrame::operator<(
+ const ReadyFrame& other) const {
+ return frame->timestamp() < other.frame->timestamp();
+}
+
+VideoRendererAlgorithm::VideoRendererAlgorithm(
+ const TimeSource::WallClockTimeCB& wall_clock_time_cb)
+ : cadence_estimator_(base::TimeDelta::FromSeconds(
+ kMinimumAcceptableTimeBetweenGlitchesSecs)),
+ wall_clock_time_cb_(wall_clock_time_cb),
+ frame_duration_calculator_(kMovingAverageSamples),
+ frame_dropping_disabled_(false) {
+ DCHECK(!wall_clock_time_cb_.is_null());
+ Reset();
+}
+
+VideoRendererAlgorithm::~VideoRendererAlgorithm() {
+}
+
+scoped_refptr<VideoFrame> VideoRendererAlgorithm::Render(
+ base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ size_t* frames_dropped) {
+ DCHECK_LT(deadline_min, deadline_max);
+
+ if (frame_queue_.empty())
+ return nullptr;
+
+ if (frames_dropped)
+ *frames_dropped = frames_dropped_during_enqueue_;
+ frames_dropped_during_enqueue_ = 0;
+
+ // Once Render() is called |last_frame_index_| has meaning and should thus be
+ // preserved even if better frames come in before it due to out of order
+ // timestamps.
+ have_rendered_frames_ = true;
+
+ // Step 1: Update the current render interval for subroutines.
+ render_interval_ = deadline_max - deadline_min;
+
+ // Step 2: Figure out if any intervals have been skipped since the last call
+ // to Render(). If so, we assume the last frame provided was rendered during
+ // those intervals and adjust its render count appropriately.
+ AccountForMissedIntervals(deadline_min, deadline_max);
+ last_deadline_max_ = deadline_max;
+
+ // Step 3: Update the wall clock timestamps and frame duration estimates for
+ // all frames currently in the |frame_queue_|.
+ if (!UpdateFrameStatistics()) {
+ DVLOG(2) << "Failed to update frame statistics.";
+
+ ReadyFrame& ready_frame = frame_queue_[last_frame_index_];
+ DCHECK(ready_frame.frame);
+
+ // If duration is unknown, we don't have enough frames to make a good guess
+ // about which frame to use, so always choose the first.
+ if (average_frame_duration_ == base::TimeDelta() &&
+ !ready_frame.start_time.is_null()) {
+ ++ready_frame.render_count;
+ }
+
+ return ready_frame.frame;
+ }
+
+ DCHECK_GT(average_frame_duration_, base::TimeDelta());
+
+ base::TimeDelta selected_frame_drift;
+
+ // Step 4: Attempt to find the best frame by cadence.
+ int cadence_overage = 0;
+ const int cadence_frame =
+ FindBestFrameByCadence(first_frame_ ? nullptr : &cadence_overage);
+ int frame_to_render = cadence_frame;
+ if (frame_to_render >= 0) {
+ selected_frame_drift =
+ CalculateAbsoluteDriftForFrame(deadline_min, frame_to_render);
+ }
+
+ // Step 5: If no frame could be found by cadence or the selected frame exceeds
+ // acceptable drift, try to find the best frame by coverage of the deadline.
+ if (frame_to_render < 0 || selected_frame_drift > max_acceptable_drift_) {
+ int second_best_by_coverage = -1;
+ const int best_by_coverage = FindBestFrameByCoverage(
+ deadline_min, deadline_max, &second_best_by_coverage);
+
+ // If the frame was previously selected based on cadence, we're only here
+ // because the drift is too large, so even if the cadence frame has the best
+ // coverage, fallback to the second best by coverage if it has better drift.
+ if (frame_to_render == best_by_coverage && second_best_by_coverage >= 0 &&
+ CalculateAbsoluteDriftForFrame(deadline_min, second_best_by_coverage) <=
+ selected_frame_drift) {
+ frame_to_render = second_best_by_coverage;
+ } else {
+ frame_to_render = best_by_coverage;
+ }
+
+ if (frame_to_render >= 0) {
+ selected_frame_drift =
+ CalculateAbsoluteDriftForFrame(deadline_min, frame_to_render);
+ }
+ }
+
+ // Step 6: If _still_ no frame could be found by coverage, try to choose the
+ // least crappy option based on the drift from the deadline. If we're here the
+ // selection is going to be bad because it means no suitable frame has any
+ // coverage of the deadline interval.
+ if (frame_to_render < 0 || selected_frame_drift > max_acceptable_drift_)
+ frame_to_render = FindBestFrameByDrift(deadline_min, &selected_frame_drift);
+
+ const bool ignored_cadence_frame =
+ cadence_frame >= 0 && frame_to_render != cadence_frame;
+ if (ignored_cadence_frame) {
+ cadence_overage = 0;
+ DVLOG(2) << "Cadence frame overridden by drift: " << selected_frame_drift;
+ }
+
+ last_render_had_glitch_ = selected_frame_drift > max_acceptable_drift_;
+ DVLOG_IF(2, last_render_had_glitch_)
+ << "Frame drift is too far: " << selected_frame_drift.InMillisecondsF()
+ << "ms";
+
+ DCHECK_GE(frame_to_render, 0);
+
+ // Drop some debugging information if a frame had poor cadence.
+ if (cadence_estimator_.has_cadence()) {
+ const ReadyFrame& last_frame_info = frame_queue_[last_frame_index_];
+ if (static_cast<size_t>(frame_to_render) != last_frame_index_ &&
+ last_frame_info.render_count < last_frame_info.ideal_render_count) {
+ last_render_had_glitch_ = true;
+ DVLOG(2) << "Under-rendered frame " << last_frame_info.frame->timestamp()
+ << "; only " << last_frame_info.render_count
+ << " times instead of " << last_frame_info.ideal_render_count;
+ } else if (static_cast<size_t>(frame_to_render) == last_frame_index_ &&
+ last_frame_info.render_count >=
+ last_frame_info.ideal_render_count) {
+ DVLOG(2) << "Over-rendered frame " << last_frame_info.frame->timestamp()
+ << "; rendered " << last_frame_info.render_count + 1
+ << " times instead of " << last_frame_info.ideal_render_count;
+ last_render_had_glitch_ = true;
+ }
+ }
+
+ // Step 7: Drop frames which occur prior to the frame to be rendered. If any
+ // frame has a zero render count it should be reported as dropped.
+ if (frame_to_render > 0) {
+ if (frames_dropped) {
+ for (int i = 0; i < frame_to_render; ++i) {
+ const ReadyFrame& frame = frame_queue_[i];
+ if (frame.render_count != frame.drop_count)
+ continue;
+
+ // If frame dropping is disabled, ignore the results of the algorithm
+ // and return the earliest unrendered frame.
+ if (frame_dropping_disabled_) {
+ frame_to_render = i;
+ break;
+ }
+
+ DVLOG(2) << "Dropping unrendered (or always dropped) frame "
+ << frame.frame->timestamp()
+ << ", wall clock: " << frame.start_time.ToInternalValue()
+ << " (" << frame.render_count << ", " << frame.drop_count
+ << ")";
+ ++(*frames_dropped);
+ if (!cadence_estimator_.has_cadence() || frame.ideal_render_count)
+ last_render_had_glitch_ = true;
+ }
+ }
+
+ // Increment the frame counter for all frames removed after the last
+ // rendered frame.
+ cadence_frame_counter_ += frame_to_render - last_frame_index_;
+ frame_queue_.erase(frame_queue_.begin(),
+ frame_queue_.begin() + frame_to_render);
+ }
+
+ if (last_render_had_glitch_ && !first_frame_) {
+ DVLOG(2) << "Deadline: [" << deadline_min.ToInternalValue() << ", "
+ << deadline_max.ToInternalValue()
+ << "], Interval: " << render_interval_.InMicroseconds()
+ << ", Duration: " << average_frame_duration_.InMicroseconds();
+ }
+
+ // Step 8: Congratulations, the frame selection gauntlet has been passed!
+ last_frame_index_ = 0;
+
+ // If we ended up choosing a frame selected by cadence, carry over the overage
+ // values from the previous frame. Overage is treated as having been
+ // displayed and dropped for each count. If the frame wasn't selected by
+ // cadence, |cadence_overage| will be zero.
+ //
+ // We also don't want to start counting render counts until the first frame
+ // has reached its presentation time; which is considered to be when its
+ // start time is at most |render_interval_| / 2 before |deadline_min|.
+ if (!first_frame_ ||
+ deadline_min >= frame_queue_.front().start_time - render_interval_ / 2) {
+ // Ignore one frame of overage if the last call to Render() ignored the
+ // frame selected by cadence due to drift.
+ if (last_render_ignored_cadence_frame_ && cadence_overage > 0)
+ cadence_overage -= 1;
+
+ last_render_ignored_cadence_frame_ = ignored_cadence_frame;
+ frame_queue_.front().render_count += cadence_overage + 1;
+ frame_queue_.front().drop_count += cadence_overage;
+
+ // Once we reach a glitch in our cadence sequence, reset the base frame
+ // number used for defining the cadence sequence.
+ if (ignored_cadence_frame) {
+ cadence_frame_counter_ = 0;
+ UpdateCadenceForFrames();
+ }
+
+ first_frame_ = false;
+ }
+
+ DCHECK(frame_queue_.front().frame);
+ return frame_queue_.front().frame;
+}
+
+size_t VideoRendererAlgorithm::RemoveExpiredFrames(base::TimeTicks deadline) {
+ // Update |last_deadline_max_| if it's no longer accurate; this should always
+ // be done or EffectiveFramesQueued() may never expire the last frame.
+ if (deadline > last_deadline_max_)
+ last_deadline_max_ = deadline;
+
+ if (!UpdateFrameStatistics() || frame_queue_.size() < 2)
+ return 0;
+
+ DCHECK_GT(average_frame_duration_, base::TimeDelta());
+
+ // Finds and removes all frames which are too old to be used; I.e., the end of
+ // their render interval is further than |max_acceptable_drift_| from the
+ // given |deadline|. We also always expire anything inserted before the last
+ // rendered frame.
+ size_t frames_to_expire = last_frame_index_;
+ const base::TimeTicks minimum_start_time =
+ deadline - max_acceptable_drift_ - average_frame_duration_;
+ for (; frames_to_expire < frame_queue_.size() - 1; ++frames_to_expire) {
+ if (frame_queue_[frames_to_expire].start_time >= minimum_start_time)
+ break;
+ }
+
+ if (!frames_to_expire)
+ return 0;
+
+ cadence_frame_counter_ += frames_to_expire - last_frame_index_;
+ frame_queue_.erase(frame_queue_.begin(),
+ frame_queue_.begin() + frames_to_expire);
+
+ last_frame_index_ = last_frame_index_ > frames_to_expire
+ ? last_frame_index_ - frames_to_expire
+ : 0;
+ return frames_to_expire;
+}
+
+void VideoRendererAlgorithm::OnLastFrameDropped() {
+ // Since compositing is disconnected from the algorithm, the algorithm may be
+ // Reset() in between ticks of the compositor, so discard notifications which
+ // are invalid.
+ //
+ // If frames were expired by RemoveExpiredFrames() this count may be zero when
+ // the OnLastFrameDropped() call comes in.
+ if (!have_rendered_frames_ || frame_queue_.empty() ||
+ !frame_queue_[last_frame_index_].render_count) {
+ return;
+ }
+
+ ++frame_queue_[last_frame_index_].drop_count;
+ DCHECK_LE(frame_queue_[last_frame_index_].drop_count,
+ frame_queue_[last_frame_index_].render_count);
+}
+
+void VideoRendererAlgorithm::Reset() {
+ frames_dropped_during_enqueue_ = last_frame_index_ = 0;
+ have_rendered_frames_ = last_render_had_glitch_ = false;
+ last_deadline_max_ = base::TimeTicks();
+ average_frame_duration_ = render_interval_ = base::TimeDelta();
+ frame_queue_.clear();
+ cadence_estimator_.Reset();
+ frame_duration_calculator_.Reset();
+ first_frame_ = true;
+ cadence_frame_counter_ = 0;
+ last_render_ignored_cadence_frame_ = false;
+
+ // Default to ATSC IS/191 recommendations for maximum acceptable drift before
+ // we have enough frames to base the maximum on frame duration.
+ max_acceptable_drift_ = base::TimeDelta::FromMilliseconds(15);
+}
+
+size_t VideoRendererAlgorithm::EffectiveFramesQueued() const {
+ if (frame_queue_.empty() || average_frame_duration_ == base::TimeDelta() ||
+ last_deadline_max_.is_null()) {
+ return frame_queue_.size();
+ }
+
+ // If we don't have cadence, subtract off any frames which are before
+ // the last rendered frame or are past their expected rendering time.
+ if (!cadence_estimator_.has_cadence()) {
+ size_t expired_frames = last_frame_index_;
+ DCHECK_LT(last_frame_index_, frame_queue_.size());
+ for (; expired_frames < frame_queue_.size(); ++expired_frames) {
+ const ReadyFrame& frame = frame_queue_[expired_frames];
+ if (frame.end_time.is_null() || frame.end_time > last_deadline_max_)
+ break;
+ }
+ return frame_queue_.size() - expired_frames;
+ }
+
+ // Find the first usable frame to start counting from.
+ const int start_index = FindBestFrameByCadence(nullptr);
+ if (start_index < 0)
+ return 0;
+
+ const base::TimeTicks minimum_start_time =
+ last_deadline_max_ - max_acceptable_drift_;
+ size_t renderable_frame_count = 0;
+ for (size_t i = start_index; i < frame_queue_.size(); ++i) {
+ const ReadyFrame& frame = frame_queue_[i];
+ if (frame.render_count < frame.ideal_render_count &&
+ (frame.end_time.is_null() || frame.end_time > minimum_start_time)) {
+ ++renderable_frame_count;
+ }
+ }
+
+ return renderable_frame_count;
+}
+
+void VideoRendererAlgorithm::EnqueueFrame(
+ const scoped_refptr<VideoFrame>& frame) {
+ DCHECK(frame);
+ DCHECK(!frame->end_of_stream());
+
+ ReadyFrame ready_frame(frame);
+ auto it = frame_queue_.empty() ? frame_queue_.end()
+ : std::lower_bound(frame_queue_.begin(),
+ frame_queue_.end(), frame);
+ DCHECK_GE(it - frame_queue_.begin(), 0);
+
+ // Drop any frames inserted before or at the last rendered frame if we've
+ // already rendered any frames.
+ const size_t new_frame_index = it - frame_queue_.begin();
+ if (new_frame_index <= last_frame_index_ && have_rendered_frames_) {
+ DVLOG(2) << "Dropping frame inserted before the last rendered frame.";
+ ++frames_dropped_during_enqueue_;
+ return;
+ }
+
+ // Drop any frames which are less than a millisecond apart in media time (even
+ // those with timestamps matching an already enqueued frame), there's no way
+ // we can reasonably render these frames; it's effectively a 1000fps limit.
+ const base::TimeDelta delta =
+ std::min(new_frame_index < frame_queue_.size()
+ ? frame_queue_[new_frame_index].frame->timestamp() -
+ frame->timestamp()
+ : base::TimeDelta::Max(),
+ new_frame_index > 0
+ ? frame->timestamp() -
+ frame_queue_[new_frame_index - 1].frame->timestamp()
+ : base::TimeDelta::Max());
+ if (delta < base::TimeDelta::FromMilliseconds(1)) {
+ DVLOG(2) << "Dropping frame too close to an already enqueued frame: "
+ << delta.InMicroseconds() << " us";
+ ++frames_dropped_during_enqueue_;
+ return;
+ }
+
+ // The vast majority of cases should always append to the back, but in rare
+ // circumstance we get out of order timestamps, http://crbug.com/386551.
+ frame_queue_.insert(it, ready_frame);
+
+ // Project the current cadence calculations to include the new frame. These
+ // may not be accurate until the next Render() call. These updates are done
+ // to ensure EffectiveFramesQueued() returns a semi-reliable result.
+ if (cadence_estimator_.has_cadence())
+ UpdateCadenceForFrames();
+
+#ifndef NDEBUG
+ // Verify sorted order in debug mode.
+ for (size_t i = 0; i < frame_queue_.size() - 1; ++i) {
+ DCHECK(frame_queue_[i].frame->timestamp() <=
+ frame_queue_[i + 1].frame->timestamp());
+ }
+#endif
+}
+
+void VideoRendererAlgorithm::AccountForMissedIntervals(
+ base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max) {
+ if (last_deadline_max_.is_null() || deadline_min <= last_deadline_max_ ||
+ !have_rendered_frames_) {
+ return;
+ }
+
+ DCHECK_GT(render_interval_, base::TimeDelta());
+ const int64 render_cycle_count =
+ (deadline_min - last_deadline_max_) / render_interval_;
+
+ // In the ideal case this value will be zero.
+ if (!render_cycle_count)
+ return;
+
+ DVLOG(2) << "Missed " << render_cycle_count << " Render() intervals.";
+
+ // Only update render count if the frame was rendered at all; it may not have
+ // been if the frame is at the head because we haven't rendered anything yet
+ // or because previous frames were removed via RemoveExpiredFrames().
+ ReadyFrame& ready_frame = frame_queue_[last_frame_index_];
+ if (!ready_frame.render_count)
+ return;
+
+ // If the frame was never really rendered since it was dropped each attempt,
+ // we need to increase the drop count as well to match the new render count.
+ // Otherwise we won't properly count the frame as dropped when it's discarded.
+ // We always update the render count so FindBestFrameByCadence() can properly
+ // account for potentially over-rendered frames.
+ if (ready_frame.render_count == ready_frame.drop_count)
+ ready_frame.drop_count += render_cycle_count;
+ ready_frame.render_count += render_cycle_count;
+}
+
+bool VideoRendererAlgorithm::UpdateFrameStatistics() {
+ DCHECK(!frame_queue_.empty());
+
+ // Figure out all current ready frame times at once.
+ std::vector<base::TimeDelta> media_timestamps;
+ media_timestamps.reserve(frame_queue_.size());
+ for (const auto& ready_frame : frame_queue_)
+ media_timestamps.push_back(ready_frame.frame->timestamp());
+
+ // If time has stopped, we can bail out early.
+ std::vector<base::TimeTicks> wall_clock_times;
+ if (!wall_clock_time_cb_.Run(media_timestamps, &wall_clock_times))
+ return false;
+
+ // Transfer the converted wall clock times into our frame queue.
+ DCHECK_EQ(wall_clock_times.size(), frame_queue_.size());
+ for (size_t i = 0; i < frame_queue_.size() - 1; ++i) {
+ ReadyFrame& frame = frame_queue_[i];
+ const bool new_sample = frame.has_estimated_end_time;
+ frame.start_time = wall_clock_times[i];
+ frame.end_time = wall_clock_times[i + 1];
+ frame.has_estimated_end_time = false;
+ if (new_sample)
+ frame_duration_calculator_.AddSample(frame.end_time - frame.start_time);
+ }
+ frame_queue_.back().start_time = wall_clock_times.back();
+
+ if (!frame_duration_calculator_.count())
+ return false;
+
+ // Compute |average_frame_duration_|, a moving average of the last few frames;
+ // see kMovingAverageSamples for the exact number.
+ average_frame_duration_ = frame_duration_calculator_.Average();
+
+ // Update the frame end time for the last frame based on the average.
+ frame_queue_.back().end_time =
+ frame_queue_.back().start_time + average_frame_duration_;
+
+ // ITU-R BR.265 recommends a maximum acceptable drift of +/- half of the frame
+ // duration; there are other asymmetric, more lenient measures, that we're
+ // forgoing in favor of simplicity.
+ //
+ // We'll always allow at least 16.66ms of drift since literature suggests it's
+ // well below the floor of detection and is high enough to ensure stability
+ // for 60fps content.
+ max_acceptable_drift_ = std::max(average_frame_duration_ / 2,
+ base::TimeDelta::FromSecondsD(1.0 / 60));
+
+ // If we were called via RemoveExpiredFrames() and Render() was never called,
+ // we may not have a render interval yet.
+ if (render_interval_ == base::TimeDelta())
+ return true;
+
+ const bool cadence_changed = cadence_estimator_.UpdateCadenceEstimate(
+ render_interval_, average_frame_duration_, max_acceptable_drift_);
+
+ // No need to update cadence if there's been no change; cadence will be set
+ // as frames are added to the queue.
+ if (!cadence_changed)
+ return true;
+
+ cadence_frame_counter_ = 0;
+ UpdateCadenceForFrames();
+
+ // Thus far there appears to be no need for special 3:2 considerations, the
+ // smoothness scores seem to naturally fit that pattern based on maximizing
+ // frame coverage.
+ return true;
+}
+
+void VideoRendererAlgorithm::UpdateCadenceForFrames() {
+ for (size_t i = last_frame_index_; i < frame_queue_.size(); ++i) {
+ // It's always okay to adjust the ideal render count, since the cadence
+ // selection method will still count its current render count towards
+ // cadence selection.
+ frame_queue_[i].ideal_render_count =
+ cadence_estimator_.has_cadence()
+ ? cadence_estimator_.GetCadenceForFrame(cadence_frame_counter_ +
+ (i - last_frame_index_))
+ : 0;
+ }
+}
+
+int VideoRendererAlgorithm::FindBestFrameByCadence(
+ int* remaining_overage) const {
+ DCHECK(!frame_queue_.empty());
+ if (!cadence_estimator_.has_cadence())
+ return -1;
+
+ DCHECK(!frame_queue_.empty());
+ DCHECK(cadence_estimator_.has_cadence());
+ const ReadyFrame& current_frame = frame_queue_[last_frame_index_];
+
+ if (remaining_overage) {
+ DCHECK_EQ(*remaining_overage, 0);
+ }
+
+ // If the current frame is below cadence, we should prefer it.
+ if (current_frame.render_count < current_frame.ideal_render_count)
+ return last_frame_index_;
+
+ // For over-rendered frames we need to ensure we skip frames and subtract
+ // each skipped frame's ideal cadence from the over-render count until we
+ // find a frame which still has a positive ideal render count.
+ int render_count_overage = std::max(
+ 0, current_frame.render_count - current_frame.ideal_render_count);
+
+ // If the current frame is on cadence or over cadence, find the next frame
+ // with a positive ideal render count.
+ for (size_t i = last_frame_index_ + 1; i < frame_queue_.size(); ++i) {
+ const ReadyFrame& frame = frame_queue_[i];
+ if (frame.ideal_render_count > render_count_overage) {
+ if (remaining_overage)
+ *remaining_overage = render_count_overage;
+ return i;
+ } else {
+ // The ideal render count should always be zero or smaller than the
+ // over-render count.
+ render_count_overage -= frame.ideal_render_count;
+ DCHECK_GE(render_count_overage, 0);
+ }
+ }
+
+ // We don't have enough frames to find a better once by cadence.
+ return -1;
+}
+
+int VideoRendererAlgorithm::FindBestFrameByCoverage(
+ base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ int* second_best) const {
+ DCHECK(!frame_queue_.empty());
+
+ // Find the frame which covers the most of the interval [deadline_min,
+ // deadline_max]. Frames outside of the interval are considered to have no
+ // coverage, while those which completely overlap the interval have complete
+ // coverage.
+ int best_frame_by_coverage = -1;
+ base::TimeDelta best_coverage;
+ std::vector<base::TimeDelta> coverage(frame_queue_.size(), base::TimeDelta());
+ for (size_t i = last_frame_index_; i < frame_queue_.size(); ++i) {
+ const ReadyFrame& frame = frame_queue_[i];
+
+ // Frames which start after the deadline interval have zero coverage.
+ if (frame.start_time > deadline_max)
+ break;
+
+ // Clamp frame end times to a maximum of |deadline_max|.
+ const base::TimeTicks end_time = std::min(deadline_max, frame.end_time);
+
+ // Frames entirely before the deadline interval have zero coverage.
+ if (end_time < deadline_min)
+ continue;
+
+ // If we're here, the current frame overlaps the deadline in some way; so
+ // compute the duration of the interval which is covered.
+ const base::TimeDelta duration =
+ end_time - std::max(deadline_min, frame.start_time);
+
+ coverage[i] = duration;
+ if (coverage[i] > best_coverage) {
+ best_frame_by_coverage = i;
+ best_coverage = coverage[i];
+ }
+ }
+
+ // Find the second best frame by coverage; done by zeroing the coverage for
+ // the previous best and recomputing the maximum.
+ *second_best = -1;
+ if (best_frame_by_coverage >= 0) {
+ coverage[best_frame_by_coverage] = base::TimeDelta();
+ auto it = std::max_element(coverage.begin(), coverage.end());
+ if (*it > base::TimeDelta())
+ *second_best = it - coverage.begin();
+ }
+
+ // If two frames have coverage within half a millisecond, prefer the earliest
+ // frame as having the best coverage. Value chosen via experimentation to
+ // ensure proper coverage calculation for 24fps in 60Hz where +/- 100us of
+ // jitter is present within the |render_interval_|. At 60Hz this works out to
+ // an allowed jitter of 3%.
+ const base::TimeDelta kAllowableJitter =
+ base::TimeDelta::FromMicroseconds(500);
+ if (*second_best >= 0 && best_frame_by_coverage > *second_best &&
+ (best_coverage - coverage[*second_best]).magnitude() <=
+ kAllowableJitter) {
+ std::swap(best_frame_by_coverage, *second_best);
+ }
+
+ // TODO(dalecurtis): We may want to make a better decision about what to do
+ // when multiple frames have equivalent coverage over an interval. Jitter in
+ // the render interval may result in irregular frame selection which may be
+ // visible to a viewer.
+ //
+ // 23.974fps and 24fps in 60Hz are the most common susceptible rates, so
+ // extensive tests have been added to ensure these cases work properly.
+
+ return best_frame_by_coverage;
+}
+
+int VideoRendererAlgorithm::FindBestFrameByDrift(
+ base::TimeTicks deadline_min,
+ base::TimeDelta* selected_frame_drift) const {
+ DCHECK(!frame_queue_.empty());
+
+ int best_frame_by_drift = -1;
+ *selected_frame_drift = base::TimeDelta::Max();
+
+ for (size_t i = last_frame_index_; i < frame_queue_.size(); ++i) {
+ const base::TimeDelta drift =
+ CalculateAbsoluteDriftForFrame(deadline_min, i);
+ // We use <= here to prefer the latest frame with minimum drift.
+ if (drift <= *selected_frame_drift) {
+ *selected_frame_drift = drift;
+ best_frame_by_drift = i;
+ }
+ }
+
+ return best_frame_by_drift;
+}
+
+base::TimeDelta VideoRendererAlgorithm::CalculateAbsoluteDriftForFrame(
+ base::TimeTicks deadline_min,
+ int frame_index) const {
+ const ReadyFrame& frame = frame_queue_[frame_index];
+ // If the frame lies before the deadline, compute the delta against the end
+ // of the frame's duration.
+ if (frame.end_time < deadline_min)
+ return deadline_min - frame.end_time;
+
+ // If the frame lies after the deadline, compute the delta against the frame's
+ // start time.
+ if (frame.start_time > deadline_min)
+ return frame.start_time - deadline_min;
+
+ // Drift is zero for frames which overlap the deadline interval.
+ DCHECK_GE(deadline_min, frame.start_time);
+ DCHECK_GE(frame.end_time, deadline_min);
+ return base::TimeDelta();
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_renderer_algorithm.h b/chromium/media/filters/video_renderer_algorithm.h
new file mode 100644
index 00000000000..4a3da250815
--- /dev/null
+++ b/chromium/media/filters/video_renderer_algorithm.h
@@ -0,0 +1,312 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_VIDEO_RENDERER_ALGORITHM_H_
+#define MEDIA_FILTERS_VIDEO_RENDERER_ALGORITHM_H_
+
+#include <deque>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/moving_average.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_renderer.h"
+#include "media/filters/video_cadence_estimator.h"
+
+namespace media {
+
+// VideoRendererAlgorithm manages a queue of VideoFrames from which it chooses
+// frames with the goal of providing a smooth playback experience. I.e., the
+// selection process results in the best possible uniformity for displayed frame
+// durations over time.
+//
+// Clients will provide frames to VRA via EnqueueFrame() and then VRA will yield
+// one of those frames in response to a future Render() call. Each Render()
+// call takes a render interval which is used to compute the best frame for
+// display during that interval.
+//
+// Render() calls are expected to happen on a regular basis. Failure to do so
+// will result in suboptimal rendering experiences. If a client knows that
+// Render() callbacks are stalled for any reason, it should tell VRA to expire
+// frames which are unusable via RemoveExpiredFrames(); this prevents useless
+// accumulation of stale VideoFrame objects (which are frequently quite large).
+//
+// The primary means of smooth frame selection is via forced integer cadence,
+// see VideoCadenceEstimator for details on this process. In cases of non-
+// integer cadence, the algorithm will fall back to choosing the frame which
+// covers the most of the current render interval. If no frame covers the
+// current interval, the least bad frame will be chosen based on its drift from
+// the start of the interval.
+//
+// Combined these three approaches enforce optimal smoothness in many cases.
+class MEDIA_EXPORT VideoRendererAlgorithm {
+ public:
+ explicit VideoRendererAlgorithm(
+ const TimeSource::WallClockTimeCB& wall_clock_time_cb);
+ ~VideoRendererAlgorithm();
+
+ // Chooses the best frame for the interval [deadline_min, deadline_max] based
+ // on available and previously rendered frames.
+ //
+ // Under ideal circumstances the deadline interval provided to a Render() call
+ // should be directly adjacent to the deadline given to the previous Render()
+ // call with no overlap or gaps. In practice, |deadline_max| is an estimated
+ // value, which means the next |deadline_min| may overlap it slightly or have
+ // a slight gap. Gaps which exceed the length of the deadline interval are
+ // assumed to be repeated frames for the purposes of cadence detection.
+ //
+ // If provided, |frames_dropped| will be set to the number of frames which
+ // were removed from |frame_queue_|, during this call, which were never
+ // returned during a previous Render() call and are no longer suitable for
+ // rendering since their wall clock time is too far in the past.
+ scoped_refptr<VideoFrame> Render(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ size_t* frames_dropped);
+
+ // Removes all video frames which are unusable since their ideal render
+ // interval [timestamp, timestamp + duration] is too far away from
+ // |deadline_min| than is allowed by drift constraints.
+ //
+ // At least one frame will always remain after this call so that subsequent
+ // Render() calls have a frame to return if no new frames are enqueued before
+ // then. Returns the number of frames removed.
+ //
+ // Note: In cases where there is no known frame duration (i.e. perhaps a video
+ // with only a single frame), the last frame can not be expired, regardless of
+ // the given deadline. Clients must handle this case externally.
+ size_t RemoveExpiredFrames(base::TimeTicks deadline);
+
+ // Clients should call this if the last frame provided by Render() was never
+ // rendered; it ensures the presented cadence matches internal models. This
+ // must be called before the next Render() call.
+ void OnLastFrameDropped();
+
+ // Adds a frame to |frame_queue_| for consideration by Render(). Out of order
+ // timestamps will be sorted into appropriate order. Do not enqueue end of
+ // stream frames. Frames inserted prior to the last rendered frame will not
+ // be used. They will be discarded on the next call to Render(), counting as
+ // dropped frames, or by RemoveExpiredFrames(), counting as expired frames.
+ //
+ // Attempting to enqueue a frame with the same timestamp as a previous frame
+ // will result in the previous frame being replaced if it has not been
+ // rendered yet. If it has been rendered, the new frame will be dropped.
+ void EnqueueFrame(const scoped_refptr<VideoFrame>& frame);
+
+ // Removes all frames from the |frame_queue_| and clears predictors. The
+ // algorithm will be as if freshly constructed after this call.
+ void Reset();
+
+ // Returns the number of frames currently buffered which could be rendered
+ // assuming current Render() interval trends. Before Render() is called, this
+ // will be the same as the number of frames given to EnqueueFrame(). After
+ // Render() has been called, one of two things will be returned:
+ //
+ // If a cadence has been identified, this will return the number of frames
+ // which have a non-zero ideal render count.
+ //
+ // If cadence has not been identified, this will return the number of frames
+ // which have a frame end time greater than the end of the last render
+ // interval passed to Render(). Note: If Render() callbacks become suspended
+ // and the duration is unknown the last frame may never be stop counting as
+ // effective. Clients must handle this case externally.
+ //
+ // In either case, frames enqueued before the last displayed frame will not
+ // be counted as effective.
+ size_t EffectiveFramesQueued() const;
+
+ size_t frames_queued() const { return frame_queue_.size(); }
+
+ // Returns the average of the duration of all frames in |frame_queue_|
+ // as measured in wall clock (not media) time.
+ base::TimeDelta average_frame_duration() const {
+ return average_frame_duration_;
+ }
+
+ // Method used for testing which disables frame dropping, in this mode the
+ // algorithm will never drop frames and instead always return every frame
+ // for display at least once.
+ void disable_frame_dropping() { frame_dropping_disabled_ = true; }
+
+ private:
+ friend class VideoRendererAlgorithmTest;
+
+ // The determination of whether to clamp to a given cadence is based on the
+ // number of seconds before a frame would have to be dropped or repeated to
+ // compensate for reaching the maximum acceptable drift.
+ //
+ // We've chosen 8 seconds based on practical observations and the fact that it
+ // allows 29.9fps and 59.94fps in 60Hz and vice versa.
+ //
+ // Most users will not be able to see a single frame repeated or dropped every
+ // 8 seconds and certainly should notice it less than the randomly variable
+ // frame durations.
+ static const int kMinimumAcceptableTimeBetweenGlitchesSecs = 8;
+
+ // Metadata container for enqueued frames. See |frame_queue_| below.
+ struct ReadyFrame {
+ ReadyFrame(const scoped_refptr<VideoFrame>& frame);
+ ~ReadyFrame();
+
+ // For use with std::lower_bound.
+ bool operator<(const ReadyFrame& other) const;
+
+ scoped_refptr<VideoFrame> frame;
+
+ // |start_time| is only available after UpdateFrameStatistics() has been
+ // called and |end_time| only after we have more than one frame.
+ base::TimeTicks start_time;
+ base::TimeTicks end_time;
+
+ // True if this frame's end time is based on the average frame duration and
+ // not the time of the next frame.
+ bool has_estimated_end_time;
+
+ int ideal_render_count;
+ int render_count;
+ int drop_count;
+ };
+
+ // Updates the render count for the last rendered frame based on the number
+ // of missing intervals between Render() calls.
+ void AccountForMissedIntervals(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max);
+
+ // Updates the render count and wall clock timestamps for all frames in
+ // |frame_queue_|. Returns false if statistics can't be updated at this time;
+ // which occurs if media time has stopped or there are not enough frames to
+ // calculate an average frame duration. Updates |cadence_estimator_|.
+ //
+ // Note: Wall clock time is recomputed each Render() call because it's
+ // expected that the TimeSource powering TimeSource::WallClockTimeCB will skew
+ // slightly based on the audio clock.
+ //
+ // TODO(dalecurtis): Investigate how accurate we need the wall clock times to
+ // be, so we can avoid recomputing every time (we would need to recompute when
+ // playback rate changes occur though).
+ bool UpdateFrameStatistics();
+
+ // Updates the ideal render count for all frames in |frame_queue_| based on
+ // the cadence returned by |cadence_estimator_|. Cadence is assigned based
+ // on |frame_counter_|.
+ void UpdateCadenceForFrames();
+
+ // If |cadence_estimator_| has detected a valid cadence, attempts to find the
+ // next frame which should be rendered. Returns -1 if not enough frames are
+ // available for cadence selection or there is no cadence.
+ //
+ // Returns the number of times a prior frame was over displayed and ate into
+ // the returned frames ideal render count via |remaining_overage|.
+ //
+ // For example, if we have 2 frames and each has an ideal display count of 3,
+ // but the first was displayed 4 times, the best frame is the second one, but
+ // it should only be displayed twice instead of thrice, so it's overage is 1.
+ int FindBestFrameByCadence(int* remaining_overage) const;
+
+ // Iterates over |frame_queue_| and finds the frame which covers the most of
+ // the deadline interval. If multiple frames have coverage of the interval,
+ // |second_best| will be set to the index of the frame with the next highest
+ // coverage. Returns -1 if no frame has any coverage of the current interval.
+ //
+ // Prefers the earliest frame if multiple frames have similar coverage (within
+ // a few percent of each other).
+ int FindBestFrameByCoverage(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ int* second_best) const;
+
+ // Iterates over |frame_queue_| and find the frame which drifts the least from
+ // |deadline_min|. There's always a best frame by drift, so the return value
+ // is always a valid frame index. |selected_frame_drift| will be set to the
+ // drift of the chosen frame.
+ //
+ // Note: Drift calculations assume contiguous frames in the time domain, so
+ // it's not possible to have a case where a frame is -10ms from |deadline_min|
+ // and another frame which is at some time after |deadline_min|. The second
+ // frame would be considered to start at -10ms before |deadline_min| and would
+ // overlap |deadline_min|, so its drift would be zero.
+ int FindBestFrameByDrift(base::TimeTicks deadline_min,
+ base::TimeDelta* selected_frame_drift) const;
+
+ // Calculates the drift from |deadline_min| for the given |frame_index|. If
+ // the [start_time, end_time] lies before |deadline_min| the drift is
+ // the delta between |deadline_min| and |end_time|. If the frame
+ // overlaps |deadline_min| the drift is zero. If the frame lies after
+ // |deadline_min| the drift is the delta between |deadline_min| and
+ // |start_time|.
+ base::TimeDelta CalculateAbsoluteDriftForFrame(base::TimeTicks deadline_min,
+ int frame_index) const;
+
+ // Queue of incoming frames waiting for rendering.
+ using VideoFrameQueue = std::deque<ReadyFrame>;
+ VideoFrameQueue frame_queue_;
+
+ // The index of the last frame rendered; presumed to be the first frame if no
+ // frame has been rendered yet. Updated by Render() and EnqueueFrame() if any
+ // frames are added or removed.
+ //
+ // In most cases this value is zero, but when out of order timestamps are
+ // present, the last rendered frame may be moved.
+ size_t last_frame_index_;
+
+ // Handles cadence detection and frame cadence assignments.
+ VideoCadenceEstimator cadence_estimator_;
+
+ // Indicates if any calls to Render() have successfully yielded a frame yet.
+ bool have_rendered_frames_;
+
+ // Callback used to convert media timestamps into wall clock timestamps.
+ const TimeSource::WallClockTimeCB wall_clock_time_cb_;
+
+ // The last |deadline_max| provided to Render(), used to predict whether
+ // frames were rendered over cadence between Render() calls.
+ base::TimeTicks last_deadline_max_;
+
+ // The average of the duration of all frames in |frame_queue_| as measured in
+ // wall clock (not media) time at the time of the last Render().
+ MovingAverage frame_duration_calculator_;
+ base::TimeDelta average_frame_duration_;
+
+ // The length of the last deadline interval given to Render(), updated at the
+ // start of Render().
+ base::TimeDelta render_interval_;
+
+ // The maximum acceptable drift before a frame can no longer be considered for
+ // rendering within a given interval.
+ base::TimeDelta max_acceptable_drift_;
+
+ // Indicates that the last call to Render() experienced a rendering glitch; it
+ // may have: under-rendered a frame, over-rendered a frame, dropped one or
+ // more frames, or chosen a frame which exceeded acceptable drift.
+ bool last_render_had_glitch_;
+
+ // For testing functionality which enables clockless playback of all frames,
+ // does not prevent frame dropping due to equivalent timestamps.
+ bool frame_dropping_disabled_;
+
+ // Tracks frames dropped during enqueue when identical timestamps are added
+ // to the queue. Callers are told about these frames during Render().
+ size_t frames_dropped_during_enqueue_;
+
+ // When cadence is present, we don't want to start counting against cadence
+ // until the first frame has reached its presentation time.
+ bool first_frame_;
+
+ // The frame number of the last rendered frame; incremented for every frame
+ // rendered and every frame dropped or expired since the last rendered frame.
+ //
+ // Given to |cadence_estimator_| when assigning cadence values to the
+ // ReadyFrameQueue. Cleared when a new cadence is detected.
+ uint64_t cadence_frame_counter_;
+
+ // Tracks whether the last call to Render() choose to ignore the frame chosen
+ // by cadence in favor of one by drift or coverage.
+ bool last_render_ignored_cadence_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoRendererAlgorithm);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VIDEO_RENDERER_ALGORITHM_H_
diff --git a/chromium/media/filters/video_renderer_algorithm_unittest.cc b/chromium/media/filters/video_renderer_algorithm_unittest.cc
new file mode 100644
index 00000000000..d8b2eb0da8a
--- /dev/null
+++ b/chromium/media/filters/video_renderer_algorithm_unittest.cc
@@ -0,0 +1,1284 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/base/video_frame_pool.h"
+#include "media/base/wall_clock_time_source.h"
+#include "media/filters/video_renderer_algorithm.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Slows down the given |fps| according to NTSC field reduction standards; see
+// http://en.wikipedia.org/wiki/Frame_rate#Digital_video_and_television
+static double NTSC(double fps) {
+ return fps / 1.001;
+}
+
+// Helper class for generating TimeTicks in a sequence according to a frequency.
+class TickGenerator {
+ public:
+ TickGenerator(base::TimeTicks base_timestamp, double hertz)
+ : tick_count_(0),
+ hertz_(hertz),
+ microseconds_per_tick_(base::Time::kMicrosecondsPerSecond / hertz),
+ base_time_(base_timestamp) {}
+
+ base::TimeDelta interval(int tick_count) const {
+ return base::TimeDelta::FromMicroseconds(tick_count *
+ microseconds_per_tick_);
+ }
+
+ base::TimeTicks current() const { return base_time_ + interval(tick_count_); }
+ base::TimeTicks step() { return step(1); }
+ base::TimeTicks step(int n) {
+ tick_count_ += n;
+ return current();
+ }
+
+ double hertz() const { return hertz_; }
+
+ void Reset(base::TimeTicks base_timestamp) {
+ base_time_ = base_timestamp;
+ tick_count_ = 0;
+ }
+
+ private:
+ // Track a tick count and seconds per tick value to ensure we don't drift too
+ // far due to accumulated errors during testing.
+ int64_t tick_count_;
+ const double hertz_;
+ const double microseconds_per_tick_;
+ base::TimeTicks base_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(TickGenerator);
+};
+
+class VideoRendererAlgorithmTest : public testing::Test {
+ public:
+ VideoRendererAlgorithmTest()
+ : tick_clock_(new base::SimpleTestTickClock()),
+ algorithm_(base::Bind(&WallClockTimeSource::GetWallClockTimes,
+ base::Unretained(&time_source_))) {
+ // Always start the TickClock at a non-zero value since null values have
+ // special connotations.
+ tick_clock_->Advance(base::TimeDelta::FromMicroseconds(10000));
+ time_source_.set_tick_clock_for_testing(tick_clock_.get());
+ }
+ ~VideoRendererAlgorithmTest() override {}
+
+ scoped_refptr<VideoFrame> CreateFrame(base::TimeDelta timestamp) {
+ const gfx::Size natural_size(8, 8);
+ return frame_pool_.CreateFrame(VideoFrame::YV12, natural_size,
+ gfx::Rect(natural_size), natural_size,
+ timestamp);
+ }
+
+ base::TimeDelta minimum_glitch_time() const {
+ return base::TimeDelta::FromSeconds(
+ VideoRendererAlgorithm::kMinimumAcceptableTimeBetweenGlitchesSecs);
+ }
+
+ base::TimeDelta max_acceptable_drift() const {
+ return algorithm_.max_acceptable_drift_;
+ }
+
+ void disable_cadence_hysteresis() {
+ algorithm_.cadence_estimator_.set_cadence_hysteresis_threshold_for_testing(
+ base::TimeDelta());
+ }
+
+ bool last_render_had_glitch() const {
+ return algorithm_.last_render_had_glitch_;
+ }
+
+ bool is_using_cadence() const {
+ return algorithm_.cadence_estimator_.has_cadence();
+ }
+
+ bool IsUsingFractionalCadence() const {
+ return is_using_cadence() &&
+ !algorithm_.cadence_estimator_.GetCadenceForFrame(1);
+ }
+
+ size_t frames_queued() const { return algorithm_.frame_queue_.size(); }
+
+ std::string GetCadence(double frame_rate, double display_rate) {
+ TickGenerator display_tg(tick_clock_->NowTicks(), display_rate);
+ TickGenerator frame_tg(base::TimeTicks(), frame_rate);
+ time_source_.StartTicking();
+
+ // Enqueue enough frames for cadence detection.
+ size_t frames_dropped = 0;
+ disable_cadence_hysteresis();
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(0)));
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(1)));
+ EXPECT_TRUE(RenderAndStep(&display_tg, &frames_dropped));
+
+ // Store cadence before reseting the algorithm.
+ const std::string cadence =
+ algorithm_.cadence_estimator_.GetCadenceForTesting();
+ time_source_.StopTicking();
+ algorithm_.Reset();
+ return cadence;
+ }
+
+ base::TimeDelta CalculateAbsoluteDriftForFrame(base::TimeTicks deadline_min,
+ int frame_index) {
+ return algorithm_.CalculateAbsoluteDriftForFrame(deadline_min, frame_index);
+ }
+
+ bool DriftOfLastRenderWasWithinTolerance(base::TimeTicks deadline_min) {
+ return CalculateAbsoluteDriftForFrame(deadline_min, 0) <=
+ algorithm_.max_acceptable_drift_;
+ }
+
+ scoped_refptr<VideoFrame> RenderAndStep(TickGenerator* tg,
+ size_t* frames_dropped) {
+ const base::TimeTicks start = tg->current();
+ const base::TimeTicks end = tg->step();
+ return algorithm_.Render(start, end, frames_dropped);
+ }
+
+ // Allows tests to run a Render() loop with sufficient frames for the various
+ // rendering modes. Upon each Render() |render_test_func| will be called with
+ // the rendered frame and the number of frames dropped.
+ template <typename OnRenderCallback>
+ void RunFramePumpTest(bool reset,
+ TickGenerator* frame_tg,
+ TickGenerator* display_tg,
+ OnRenderCallback render_test_func) {
+ SCOPED_TRACE(base::StringPrintf("Rendering %.03f fps into %0.03f",
+ frame_tg->hertz(), display_tg->hertz()));
+ tick_clock_->Advance(display_tg->current() - tick_clock_->NowTicks());
+ time_source_.StartTicking();
+
+ const bool fresh_algorithm = !algorithm_.have_rendered_frames_;
+
+ base::TimeDelta last_start_timestamp = kNoTimestamp();
+ bool should_use_cadence = false;
+ int glitch_count = 0;
+ const base::TimeTicks start_time = tick_clock_->NowTicks();
+ while (tick_clock_->NowTicks() - start_time < minimum_glitch_time()) {
+ while (algorithm_.EffectiveFramesQueued() < 3 ||
+ frame_tg->current() - time_source_.CurrentMediaTime() <
+ base::TimeTicks()) {
+ algorithm_.EnqueueFrame(
+ CreateFrame(frame_tg->current() - base::TimeTicks()));
+ frame_tg->step();
+ }
+
+ size_t frames_dropped = 0;
+ const base::TimeTicks deadline_min = display_tg->current();
+ const base::TimeTicks deadline_max = display_tg->step();
+ scoped_refptr<VideoFrame> frame =
+ algorithm_.Render(deadline_min, deadline_max, &frames_dropped);
+
+ render_test_func(frame, frames_dropped);
+ tick_clock_->Advance(display_tg->current() - tick_clock_->NowTicks());
+
+ if (HasFatalFailure())
+ return;
+
+ // Render() should always return a frame within drift tolerances.
+ ASSERT_TRUE(DriftOfLastRenderWasWithinTolerance(deadline_min));
+
+ // If we have a frame, the timestamps should always be monotonically
+ // increasing.
+ if (frame) {
+ if (last_start_timestamp != kNoTimestamp())
+ ASSERT_LE(last_start_timestamp, frame->timestamp());
+ else
+ last_start_timestamp = frame->timestamp();
+ }
+
+ // Only verify certain properties for fresh instances.
+ if (fresh_algorithm) {
+ ASSERT_NEAR(frame_tg->interval(1).InMicroseconds(),
+ algorithm_.average_frame_duration().InMicroseconds(), 1);
+
+ if (is_using_cadence() && last_render_had_glitch())
+ ++glitch_count;
+
+ // Once cadence starts, it should never stop for the current set of
+ // tests.
+ if (is_using_cadence())
+ should_use_cadence = true;
+ ASSERT_EQ(is_using_cadence(), should_use_cadence);
+ }
+
+ // When there are no frames, we're not using cadence based selection, or a
+ // frame is under cadence the two queue size reports should be equal to
+ // the number of usable frames; i.e. those frames whose end time was not
+ // within the last render interval.
+ if (!is_using_cadence() || !frames_queued() ||
+ GetCurrentFrameDisplayCount() < GetCurrentFrameIdealDisplayCount()) {
+ ASSERT_NEAR(GetUsableFrameCount(deadline_max),
+ algorithm_.EffectiveFramesQueued(),
+ fresh_algorithm ? 0 : 1);
+ } else if (is_using_cadence() && !IsUsingFractionalCadence()) {
+ // If there was no glitch in the last render, the two queue sizes should
+ // be off by exactly one frame; i.e., the current frame doesn't count.
+ if (!last_render_had_glitch() && fresh_algorithm)
+ ASSERT_EQ(frames_queued() - 1, algorithm_.EffectiveFramesQueued());
+ } else if (IsUsingFractionalCadence()) {
+ // The frame estimate should be off by at most one frame.
+ const size_t estimated_frames_queued =
+ frames_queued() /
+ algorithm_.cadence_estimator_.cadence_size_for_testing();
+ ASSERT_NEAR(algorithm_.EffectiveFramesQueued(), estimated_frames_queued,
+ 1);
+ }
+ }
+
+ // When using cadence, the glitch count should be at most one for when
+ // rendering for the less than minimum_glitch_time().
+ if (fresh_algorithm && is_using_cadence())
+ ASSERT_LE(glitch_count, 1);
+
+ time_source_.StopTicking();
+ if (reset) {
+ algorithm_.Reset();
+ time_source_.SetMediaTime(base::TimeDelta());
+ }
+ }
+
+ int FindBestFrameByCoverage(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ int* second_best) {
+ return algorithm_.FindBestFrameByCoverage(deadline_min, deadline_max,
+ second_best);
+ }
+
+ int FindBestFrameByDrift(base::TimeTicks deadline_min,
+ base::TimeDelta* selected_frame_drift) {
+ return algorithm_.FindBestFrameByDrift(deadline_min, selected_frame_drift);
+ }
+
+ int GetCurrentFrameDropCount() const {
+ DCHECK_GT(frames_queued(), 0u);
+ return algorithm_.frame_queue_[algorithm_.last_frame_index_].drop_count;
+ }
+
+ int GetCurrentFrameDisplayCount() const {
+ DCHECK_GT(frames_queued(), 0u);
+ return algorithm_.frame_queue_[algorithm_.last_frame_index_].render_count;
+ }
+
+ int GetCurrentFrameIdealDisplayCount() const {
+ DCHECK_GT(frames_queued(), 0u);
+ return algorithm_.frame_queue_[algorithm_.last_frame_index_]
+ .ideal_render_count;
+ }
+
+ int AccountForMissedIntervalsAndStep(TickGenerator* tg) {
+ const base::TimeTicks start = tg->current();
+ const base::TimeTicks end = tg->step();
+ return AccountForMissedIntervals(start, end);
+ }
+
+ int AccountForMissedIntervals(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max) {
+ algorithm_.AccountForMissedIntervals(deadline_min, deadline_max);
+ return frames_queued() ? GetCurrentFrameDisplayCount() : -1;
+ }
+
+ size_t GetUsableFrameCount(base::TimeTicks deadline_max) {
+ if (is_using_cadence())
+ return frames_queued();
+
+ for (size_t i = 0; i < frames_queued(); ++i)
+ if (algorithm_.frame_queue_[i].end_time > deadline_max)
+ return frames_queued() - i;
+ return 0;
+ }
+
+ protected:
+ VideoFramePool frame_pool_;
+ scoped_ptr<base::SimpleTestTickClock> tick_clock_;
+ WallClockTimeSource time_source_;
+ VideoRendererAlgorithm algorithm_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoRendererAlgorithmTest);
+};
+
+TEST_F(VideoRendererAlgorithmTest, Empty) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+ size_t frames_dropped = 0;
+ EXPECT_EQ(0u, frames_queued());
+ EXPECT_FALSE(RenderAndStep(&tg, &frames_dropped));
+ EXPECT_EQ(0u, frames_dropped);
+ EXPECT_EQ(0u, frames_queued());
+ EXPECT_NE(base::TimeDelta(), max_acceptable_drift());
+}
+
+TEST_F(VideoRendererAlgorithmTest, Reset) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_NE(base::TimeDelta(), max_acceptable_drift());
+ algorithm_.Reset();
+ EXPECT_EQ(0u, frames_queued());
+ EXPECT_NE(base::TimeDelta(), max_acceptable_drift());
+}
+
+TEST_F(VideoRendererAlgorithmTest, AccountForMissingIntervals) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+ time_source_.StartTicking();
+
+ // Disable hysteresis since AccountForMissingIntervals() only affects cadence
+ // based rendering.
+ disable_cadence_hysteresis();
+
+ // Simulate Render() called before any frames are present.
+ EXPECT_EQ(-1, AccountForMissedIntervalsAndStep(&tg));
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(2)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(3)));
+
+ // Simulate Render() called before any frames have been rendered.
+ EXPECT_EQ(0, AccountForMissedIntervalsAndStep(&tg));
+
+ // Render one frame (several are in the past and will be dropped).
+ base::TimeTicks deadline_min = tg.current();
+ base::TimeTicks deadline_max = tg.step();
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame =
+ algorithm_.Render(deadline_min, deadline_max, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(2), frame->timestamp());
+ EXPECT_EQ(2u, frames_dropped);
+
+ ASSERT_EQ(1, GetCurrentFrameDisplayCount());
+
+ // Now calling AccountForMissingIntervals with an interval which overlaps the
+ // previous should do nothing.
+ deadline_min += tg.interval(1) / 2;
+ deadline_max += tg.interval(1) / 2;
+ EXPECT_EQ(1, AccountForMissedIntervals(deadline_min, deadline_max));
+
+ // Steping by 1.5 intervals, is not enough to increase the count.
+ deadline_min += tg.interval(1);
+ deadline_max += tg.interval(1);
+ EXPECT_EQ(1, AccountForMissedIntervals(deadline_min, deadline_max));
+
+ // Calling it after a full skipped interval should increase the count by 1 for
+ // each skipped interval.
+ tg.step();
+ EXPECT_EQ(2, AccountForMissedIntervalsAndStep(&tg));
+
+ // 4 because [tg.current(), tg.step()] now represents 2 additional intervals.
+ EXPECT_EQ(4, AccountForMissedIntervalsAndStep(&tg));
+
+ // Frame should be way over cadence and no good frames remain, so last frame
+ // should be returned.
+ frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(3), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+}
+
+TEST_F(VideoRendererAlgorithmTest, OnLastFrameDropped) {
+ TickGenerator frame_tg(base::TimeTicks(), 25);
+ TickGenerator display_tg(tick_clock_->NowTicks(), 50);
+ time_source_.StartTicking();
+
+ // Disable hysteresis since OnLastFrameDropped() only affects cadence based
+ // rendering.
+ disable_cadence_hysteresis();
+
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(0)));
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(1)));
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(2)));
+
+ // Render one frame (several are in the past and will be dropped).
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ // The frame should have its display count decremented once it's reported as
+ // dropped.
+ ASSERT_EQ(1, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(0, GetCurrentFrameDropCount());
+ algorithm_.OnLastFrameDropped();
+ ASSERT_EQ(1, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(1, GetCurrentFrameDropCount());
+
+ // Render the frame again and then force another drop.
+ frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ ASSERT_EQ(2, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(1, GetCurrentFrameDropCount());
+ algorithm_.OnLastFrameDropped();
+ ASSERT_EQ(2, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(2, GetCurrentFrameDropCount());
+
+ // The next Render() call should now count this frame as dropped.
+ frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(1), frame->timestamp());
+ EXPECT_EQ(1u, frames_dropped);
+ ASSERT_EQ(1, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(0, GetCurrentFrameDropCount());
+
+ // Rendering again should result in the same frame being displayed.
+ frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(1), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ // In this case, the drop count is less than the display count, so the frame
+ // should not be counted as dropped.
+ ASSERT_EQ(2, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(0, GetCurrentFrameDropCount());
+ algorithm_.OnLastFrameDropped();
+ ASSERT_EQ(2, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(1, GetCurrentFrameDropCount());
+
+ // The third frame should be rendered correctly now and the previous frame not
+ // counted as having been dropped.
+ frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(2), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+}
+
+TEST_F(VideoRendererAlgorithmTest, EffectiveFramesQueued) {
+ TickGenerator frame_tg(base::TimeTicks(), 50);
+ TickGenerator display_tg(tick_clock_->NowTicks(), 25);
+
+ // Disable hysteresis since EffectiveFramesQueued() is tested as part of the
+ // normal frame pump tests when cadence is not present.
+ disable_cadence_hysteresis();
+
+ EXPECT_EQ(0u, algorithm_.EffectiveFramesQueued());
+ time_source_.StartTicking();
+
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(0)));
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(1)));
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(2)));
+ EXPECT_EQ(3u, algorithm_.EffectiveFramesQueued());
+
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(3)));
+ EXPECT_EQ(4u, algorithm_.EffectiveFramesQueued());
+ EXPECT_EQ(4u, frames_queued());
+
+ // Render one frame which will detect cadence...
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ // Fractional cadence should be detected and the count will decrease.
+ ASSERT_TRUE(is_using_cadence());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+ EXPECT_EQ(4u, frames_queued());
+
+ // Dropping the last rendered frame should do nothing, since the last frame
+ // is already excluded from the count if it has a display count of 1.
+ algorithm_.OnLastFrameDropped();
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+}
+
+TEST_F(VideoRendererAlgorithmTest, EffectiveFramesQueuedWithoutCadence) {
+ TickGenerator tg(tick_clock_->NowTicks(), 60);
+
+ EXPECT_EQ(0u, algorithm_.EffectiveFramesQueued());
+ time_source_.StartTicking();
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(2)));
+ EXPECT_EQ(3u, algorithm_.EffectiveFramesQueued());
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(3)));
+ EXPECT_EQ(4u, algorithm_.EffectiveFramesQueued());
+ EXPECT_EQ(4u, frames_queued());
+
+ // Issue a render call that should drop the first two frames and mark the 3rd
+ // as consumed.
+ tg.step(2);
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_FALSE(is_using_cadence());
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(2u, frames_dropped);
+ EXPECT_EQ(tg.interval(2), frame->timestamp());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+ EXPECT_EQ(2u, frames_queued());
+
+ // Rendering one more frame should return 0 effective frames queued.
+ frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_FALSE(is_using_cadence());
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(0u, frames_dropped);
+ EXPECT_EQ(tg.interval(3), frame->timestamp());
+ EXPECT_EQ(0u, algorithm_.EffectiveFramesQueued());
+ EXPECT_EQ(1u, frames_queued());
+}
+
+// The maximum acceptable drift should be updated once we have two frames.
+TEST_F(VideoRendererAlgorithmTest, AcceptableDriftUpdated) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+
+ size_t frames_dropped = 0;
+ const base::TimeDelta original_drift = max_acceptable_drift();
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_TRUE(RenderAndStep(&tg, &frames_dropped));
+ EXPECT_EQ(original_drift, max_acceptable_drift());
+
+ // Time must be ticking to get wall clock times for frames.
+ time_source_.StartTicking();
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_TRUE(RenderAndStep(&tg, &frames_dropped));
+ EXPECT_NE(original_drift, max_acceptable_drift());
+}
+
+// Verifies behavior when time stops.
+TEST_F(VideoRendererAlgorithmTest, TimeIsStopped) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+
+ // Prior to rendering the first frame, the algorithm should always return the
+ // first available frame.
+ size_t frames_dropped = 0;
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ EXPECT_EQ(1u, frames_queued());
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+
+ // The same timestamp should be returned after time starts.
+ tick_clock_->Advance(tg.interval(1));
+ time_source_.StartTicking();
+ frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+
+ // Ensure the next suitable frame is vended as time advances.
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+ frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(1), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(0u, algorithm_.EffectiveFramesQueued());
+
+ // Once time stops ticking, any further frames shouldn't be returned, even if
+ // the interval requested more closely matches.
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(2)));
+ time_source_.StopTicking();
+ frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(1), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+}
+
+// Verify frames inserted out of order end up in the right spot and are rendered
+// according to the API contract.
+TEST_F(VideoRendererAlgorithmTest, SortedFrameQueue) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+
+ // Ensure frames handed in out of order before time starts ticking are sorted
+ // and returned in the correct order upon Render().
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(3)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(2)));
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+
+ time_source_.StartTicking();
+
+ // The first call should return the earliest frame appended.
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(0u, frames_dropped);
+ EXPECT_EQ(tg.interval(2), frame->timestamp());
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+
+ // Since a frame has already been rendered, queuing this frame and calling
+ // Render() should result in it being dropped; even though it's a better
+ // candidate for the desired interval. The frame is dropped during enqueue so
+ // it won't show up in frames_queued().
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+ frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(1u, frames_dropped);
+ EXPECT_EQ(tg.interval(2), frame->timestamp());
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+}
+
+// Run through integer cadence selection for 1, 2, 3, and 4.
+TEST_F(VideoRendererAlgorithmTest, BestFrameByCadence) {
+ const double kTestRates[][2] = {{60, 60}, {30, 60}, {25, 75}, {25, 100}};
+
+ for (const auto& test_rate : kTestRates) {
+ disable_cadence_hysteresis();
+
+ TickGenerator frame_tg(base::TimeTicks(), test_rate[0]);
+ TickGenerator display_tg(tick_clock_->NowTicks(), test_rate[1]);
+
+ int actual_frame_pattern = 0;
+ const int desired_frame_pattern = test_rate[1] / test_rate[0];
+ scoped_refptr<VideoFrame> current_frame;
+ RunFramePumpTest(
+ true, &frame_tg, &display_tg,
+ [&current_frame, &actual_frame_pattern, desired_frame_pattern, this](
+ const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {
+ ASSERT_TRUE(frame);
+ ASSERT_EQ(0u, frames_dropped);
+
+ // Each frame should display for exactly it's desired cadence pattern.
+ if (!current_frame || current_frame == frame) {
+ actual_frame_pattern++;
+ } else {
+ ASSERT_EQ(actual_frame_pattern, desired_frame_pattern);
+ actual_frame_pattern = 1;
+ }
+
+ current_frame = frame;
+ ASSERT_TRUE(is_using_cadence());
+ });
+
+ if (HasFatalFailure())
+ return;
+ }
+}
+
+TEST_F(VideoRendererAlgorithmTest, BestFrameByCadenceOverdisplayed) {
+ TickGenerator frame_tg(base::TimeTicks(), 25);
+ TickGenerator display_tg(tick_clock_->NowTicks(), 50);
+ time_source_.StartTicking();
+ disable_cadence_hysteresis();
+
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(0)));
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(1)));
+
+ // Render frames until we've exhausted available frames and the last frame is
+ // forced to be over displayed.
+ for (int i = 0; i < 5; ++i) {
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame =
+ RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(i < 4 ? i / 2 : 1), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+ ASSERT_EQ(2, GetCurrentFrameIdealDisplayCount());
+ }
+
+ // Verify last frame is above cadence (2 in this case)
+ ASSERT_EQ(GetCurrentFrameIdealDisplayCount() + 1,
+ GetCurrentFrameDisplayCount());
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(2)));
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(3)));
+
+ // The next frame should only be displayed once, since the previous one was
+ // over displayed by one frame.
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(2), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ // Enqueuing a new frame should keep the correct cadence values.
+ algorithm_.EnqueueFrame(CreateFrame(frame_tg.interval(4)));
+
+ ASSERT_EQ(2, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(1, GetCurrentFrameDropCount());
+ ASSERT_EQ(2, GetCurrentFrameIdealDisplayCount());
+
+ frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame_tg.interval(3), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+ ASSERT_EQ(1, GetCurrentFrameDisplayCount());
+ ASSERT_EQ(0, GetCurrentFrameDropCount());
+ ASSERT_EQ(2, GetCurrentFrameIdealDisplayCount());
+}
+
+TEST_F(VideoRendererAlgorithmTest, BestFrameByCadenceOverdisplayedForDrift) {
+ // Use 24.94 to ensure drift expires pretty rapidly (8.36s in this case).
+ TickGenerator frame_tg(base::TimeTicks(), 24.94);
+ TickGenerator display_tg(tick_clock_->NowTicks(), 50);
+ time_source_.StartTicking();
+ disable_cadence_hysteresis();
+
+ scoped_refptr<VideoFrame> last_frame;
+ bool have_overdisplayed_frame = false;
+ while (!have_overdisplayed_frame) {
+ while (algorithm_.EffectiveFramesQueued() < 2) {
+ algorithm_.EnqueueFrame(
+ CreateFrame(frame_tg.current() - base::TimeTicks()));
+ frame_tg.step();
+ }
+
+ size_t frames_dropped = 0;
+ last_frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_TRUE(last_frame);
+ ASSERT_TRUE(is_using_cadence());
+ ASSERT_EQ(0u, frames_dropped);
+ ASSERT_EQ(2, GetCurrentFrameIdealDisplayCount());
+ have_overdisplayed_frame = GetCurrentFrameDisplayCount() > 2;
+ }
+
+ ASSERT_TRUE(last_render_had_glitch());
+
+ // We've reached the point where the current frame is over displayed due to
+ // drift, the next frame should resume cadence without accounting for the
+ // overdisplayed frame.
+
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> next_frame =
+ RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_EQ(0u, frames_dropped);
+ ASSERT_NE(last_frame, next_frame);
+ ASSERT_TRUE(is_using_cadence());
+ ASSERT_EQ(2, GetCurrentFrameIdealDisplayCount());
+ ASSERT_EQ(1, GetCurrentFrameDisplayCount());
+ last_frame = next_frame;
+
+ next_frame = RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_EQ(0u, frames_dropped);
+ ASSERT_EQ(last_frame, next_frame);
+ ASSERT_TRUE(is_using_cadence());
+ ASSERT_EQ(2, GetCurrentFrameIdealDisplayCount());
+ ASSERT_EQ(2, GetCurrentFrameDisplayCount());
+}
+
+TEST_F(VideoRendererAlgorithmTest, BestFrameByCoverage) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+ time_source_.StartTicking();
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(2)));
+
+ base::TimeTicks deadline_min = tg.current();
+ base::TimeTicks deadline_max = deadline_min + tg.interval(1);
+
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame =
+ algorithm_.Render(deadline_min, deadline_max, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ int second_best = 0;
+
+ // Coverage should be 1 for if the frame overlaps the interval entirely, no
+ // second best should be found.
+ EXPECT_EQ(0,
+ FindBestFrameByCoverage(deadline_min, deadline_max, &second_best));
+ EXPECT_EQ(-1, second_best);
+
+ // 49/51 coverage for frame 0 and frame 1 should be within tolerance such that
+ // the earlier frame should still be chosen.
+ deadline_min = tg.current() + tg.interval(1) / 2 +
+ base::TimeDelta::FromMicroseconds(250);
+ deadline_max = deadline_min + tg.interval(1);
+ EXPECT_EQ(0,
+ FindBestFrameByCoverage(deadline_min, deadline_max, &second_best));
+ EXPECT_EQ(1, second_best);
+
+ // 48/52 coverage should result in the second frame being chosen.
+ deadline_min = tg.current() + tg.interval(1) / 2 +
+ base::TimeDelta::FromMicroseconds(500);
+ deadline_max = deadline_min + tg.interval(1);
+ EXPECT_EQ(1,
+ FindBestFrameByCoverage(deadline_min, deadline_max, &second_best));
+ EXPECT_EQ(0, second_best);
+
+ // Overlapping three frames should choose the one with the most coverage and
+ // the second best should be the earliest frame.
+ deadline_min = tg.current() + tg.interval(1) / 2;
+ deadline_max = deadline_min + tg.interval(2);
+ EXPECT_EQ(1,
+ FindBestFrameByCoverage(deadline_min, deadline_max, &second_best));
+ EXPECT_EQ(0, second_best);
+
+ // Requesting coverage outside of all known frames should return -1 for both
+ // best indices.
+ deadline_min = tg.current() + tg.interval(frames_queued());
+ deadline_max = deadline_min + tg.interval(1);
+ EXPECT_EQ(-1,
+ FindBestFrameByCoverage(deadline_min, deadline_max, &second_best));
+ EXPECT_EQ(-1, second_best);
+}
+
+TEST_F(VideoRendererAlgorithmTest, BestFrameByDriftAndDriftCalculations) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+ time_source_.StartTicking();
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = algorithm_.Render(
+ tg.current(), tg.current() + tg.interval(1), &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ base::TimeDelta zero_drift, half_drift = tg.interval(1) / 2;
+ base::TimeDelta detected_drift;
+
+ // Frame_0 overlaps the deadline, Frame_1 is a full interval away.
+ base::TimeTicks deadline = tg.current();
+ EXPECT_EQ(zero_drift, CalculateAbsoluteDriftForFrame(deadline, 0));
+ EXPECT_EQ(tg.interval(1), CalculateAbsoluteDriftForFrame(deadline, 1));
+ EXPECT_EQ(0, FindBestFrameByDrift(deadline, &detected_drift));
+ EXPECT_EQ(zero_drift, detected_drift);
+
+ // Frame_0 overlaps the deadline, Frame_1 is a half interval away.
+ deadline += half_drift;
+ EXPECT_EQ(zero_drift, CalculateAbsoluteDriftForFrame(deadline, 0));
+ EXPECT_EQ(half_drift, CalculateAbsoluteDriftForFrame(deadline, 1));
+ EXPECT_EQ(0, FindBestFrameByDrift(deadline, &detected_drift));
+ EXPECT_EQ(zero_drift, detected_drift);
+
+ // Both frames overlap the deadline.
+ deadline += half_drift;
+ EXPECT_EQ(zero_drift, CalculateAbsoluteDriftForFrame(deadline, 0));
+ EXPECT_EQ(zero_drift, CalculateAbsoluteDriftForFrame(deadline, 1));
+ EXPECT_EQ(1, FindBestFrameByDrift(deadline, &detected_drift));
+ EXPECT_EQ(zero_drift, detected_drift);
+
+ // Frame_0 is half an interval away, Frame_1 overlaps the deadline.
+ deadline += half_drift;
+ EXPECT_EQ(half_drift, CalculateAbsoluteDriftForFrame(deadline, 0));
+ EXPECT_EQ(zero_drift, CalculateAbsoluteDriftForFrame(deadline, 1));
+ EXPECT_EQ(1, FindBestFrameByDrift(deadline, &detected_drift));
+ EXPECT_EQ(zero_drift, detected_drift);
+
+ // Frame_0 is a full interval away, Frame_1 overlaps the deadline.
+ deadline += half_drift;
+ EXPECT_EQ(tg.interval(1), CalculateAbsoluteDriftForFrame(deadline, 0));
+ EXPECT_EQ(zero_drift, CalculateAbsoluteDriftForFrame(deadline, 1));
+ EXPECT_EQ(1, FindBestFrameByDrift(deadline, &detected_drift));
+ EXPECT_EQ(zero_drift, detected_drift);
+
+ // Both frames are entirely before the deadline.
+ deadline += half_drift;
+ EXPECT_EQ(tg.interval(1) + half_drift,
+ CalculateAbsoluteDriftForFrame(deadline, 0));
+ EXPECT_EQ(half_drift, CalculateAbsoluteDriftForFrame(deadline, 1));
+ EXPECT_EQ(1, FindBestFrameByDrift(deadline, &detected_drift));
+ EXPECT_EQ(half_drift, detected_drift);
+}
+
+// Run through fractional cadence selection for 1/2, 1/3, and 1/4.
+TEST_F(VideoRendererAlgorithmTest, BestFrameByFractionalCadence) {
+ const double kTestRates[][2] = {{120, 60}, {72, 24}, {100, 25}};
+
+ for (const auto& test_rate : kTestRates) {
+ disable_cadence_hysteresis();
+
+ TickGenerator frame_tg(base::TimeTicks(), test_rate[0]);
+ TickGenerator display_tg(tick_clock_->NowTicks(), test_rate[1]);
+
+ const size_t desired_drop_pattern = test_rate[0] / test_rate[1] - 1;
+ scoped_refptr<VideoFrame> current_frame;
+ RunFramePumpTest(
+ true, &frame_tg, &display_tg,
+ [&current_frame, desired_drop_pattern, this](
+ const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {
+ ASSERT_TRUE(frame);
+
+ // The first frame should have zero dropped frames, but each Render()
+ // call after should drop the same number of frames based on the
+ // fractional cadence.
+ if (!current_frame)
+ ASSERT_EQ(0u, frames_dropped);
+ else
+ ASSERT_EQ(desired_drop_pattern, frames_dropped);
+
+ ASSERT_NE(current_frame, frame);
+ ASSERT_TRUE(is_using_cadence());
+ current_frame = frame;
+ });
+
+ if (HasFatalFailure())
+ return;
+ }
+}
+
+// Verify a 3:2 frame pattern for 23.974fps and 24fps in 60Hz.
+TEST_F(VideoRendererAlgorithmTest, FilmCadence) {
+ const double kTestRates[] = {NTSC(24), 24};
+ disable_cadence_hysteresis();
+
+ for (double frame_rate : kTestRates) {
+ scoped_refptr<VideoFrame> current_frame;
+ int actual_frame_pattern = 0, desired_frame_pattern = 3;
+
+ TickGenerator frame_tg(base::TimeTicks(), frame_rate);
+ TickGenerator display_tg(tick_clock_->NowTicks(), 60);
+
+ RunFramePumpTest(
+ true, &frame_tg, &display_tg,
+ [&current_frame, &actual_frame_pattern, &desired_frame_pattern, this](
+ const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {
+ ASSERT_TRUE(frame);
+ ASSERT_EQ(0u, frames_dropped);
+
+ if (!current_frame || current_frame == frame) {
+ actual_frame_pattern++;
+ } else {
+ ASSERT_EQ(actual_frame_pattern, desired_frame_pattern);
+ actual_frame_pattern = 1;
+ desired_frame_pattern = (desired_frame_pattern == 3 ? 2 : 3);
+ }
+
+ current_frame = frame;
+ ASSERT_TRUE(is_using_cadence());
+ });
+
+ if (HasFatalFailure())
+ return;
+ }
+}
+
+// Spot check common display and frame rate pairs for correctness.
+TEST_F(VideoRendererAlgorithmTest, CadenceCalculations) {
+ ASSERT_EQ("[3:2]", GetCadence(24, 60));
+ ASSERT_EQ("[3:2]", GetCadence(NTSC(24), 60));
+ ASSERT_EQ("[]", GetCadence(25, 60));
+ ASSERT_EQ("[2]", GetCadence(NTSC(30), 60));
+ ASSERT_EQ("[2]", GetCadence(30, 60));
+ ASSERT_EQ("[]", GetCadence(50, 60));
+ ASSERT_EQ("[1]", GetCadence(NTSC(60), 60));
+ ASSERT_EQ("[1:0]", GetCadence(120, 60));
+
+ // 50Hz is common in the EU.
+ ASSERT_EQ("[]", GetCadence(NTSC(24), 50));
+ ASSERT_EQ("[]", GetCadence(24, 50));
+ ASSERT_EQ("[2]", GetCadence(NTSC(25), 50));
+ ASSERT_EQ("[2]", GetCadence(25, 50));
+ ASSERT_EQ("[]", GetCadence(NTSC(30), 50));
+ ASSERT_EQ("[]", GetCadence(30, 50));
+ ASSERT_EQ("[]", GetCadence(NTSC(60), 50));
+ ASSERT_EQ("[]", GetCadence(60, 50));
+
+ ASSERT_EQ("[]", GetCadence(25, NTSC(60)));
+ ASSERT_EQ("[1:0]", GetCadence(120, NTSC(60)));
+ ASSERT_EQ("[60]", GetCadence(1, NTSC(60)));
+}
+
+TEST_F(VideoRendererAlgorithmTest, RemoveExpiredFrames) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current()));
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+
+ time_source_.StartTicking();
+
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(2)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(3)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(4)));
+ EXPECT_EQ(5u, algorithm_.EffectiveFramesQueued());
+
+ tg.step(2);
+ ASSERT_EQ(2u, algorithm_.RemoveExpiredFrames(tg.current()));
+ frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(1u, frames_dropped);
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(3), frame->timestamp());
+
+ // Advance expiry enough that one frame is removed, but one remains and is
+ // still counted as effective.
+ ASSERT_EQ(
+ 1u, algorithm_.RemoveExpiredFrames(tg.current() + tg.interval(1) * 0.9));
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+
+ // Advancing expiry once more should mark the frame as ineffective.
+ tg.step();
+ ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current()));
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(0u, algorithm_.EffectiveFramesQueued());
+}
+
+TEST_F(VideoRendererAlgorithmTest, RemoveExpiredFramesCadence) {
+ TickGenerator tg(tick_clock_->NowTicks(), 50);
+ disable_cadence_hysteresis();
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(1)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(2)));
+
+ ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current()));
+ EXPECT_EQ(3u, algorithm_.EffectiveFramesQueued());
+
+ time_source_.StartTicking();
+
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> frame = RenderAndStep(&tg, &frames_dropped);
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(tg.interval(0), frame->timestamp());
+ EXPECT_EQ(0u, frames_dropped);
+ ASSERT_TRUE(is_using_cadence());
+ EXPECT_EQ(2u, algorithm_.EffectiveFramesQueued());
+
+ // Advance expiry enough that some frames are removed, but one remains and is
+ // still counted as effective.
+ ASSERT_EQ(2u, algorithm_.RemoveExpiredFrames(tg.current() + tg.interval(1) +
+ max_acceptable_drift() * 1.25));
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(1u, algorithm_.EffectiveFramesQueued());
+
+ // Advancing expiry once more should mark the frame as ineffective.
+ tg.step(3);
+ ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current()));
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(0u, algorithm_.EffectiveFramesQueued());
+}
+
+TEST_F(VideoRendererAlgorithmTest, CadenceBasedTest) {
+ // Common display rates.
+ const double kDisplayRates[] = {
+ NTSC(24),
+ 24,
+ NTSC(25),
+ 25,
+ NTSC(30),
+ 30,
+ 48,
+ NTSC(50),
+ 50,
+ NTSC(60),
+ 60,
+ 75,
+ 120,
+ 144,
+ };
+
+ // List of common frame rate values. Values pulled from local test media,
+ // videostack test matrix, and Wikipedia.
+ const double kTestRates[] = {
+ 1, 10, 12.5, 15, NTSC(24), 24, NTSC(25), 25,
+ NTSC(30), 30, 30.12, 48, NTSC(50), 50, 58.74, NTSC(60),
+ 60, 72, 90, 100, 120, 144, 240, 300,
+ };
+
+ for (double display_rate : kDisplayRates) {
+ for (double frame_rate : kTestRates) {
+ TickGenerator frame_tg(base::TimeTicks(), frame_rate);
+ TickGenerator display_tg(tick_clock_->NowTicks(), display_rate);
+ RunFramePumpTest(
+ true, &frame_tg, &display_tg,
+ [](const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {});
+ if (HasFatalFailure())
+ return;
+ }
+ }
+}
+
+// Rotate through various playback rates and ensure algorithm adapts correctly.
+TEST_F(VideoRendererAlgorithmTest, VariableFrameRateCadence) {
+ TickGenerator frame_tg(base::TimeTicks(), NTSC(30));
+ TickGenerator display_tg(tick_clock_->NowTicks(), 60);
+
+ const double kTestRates[] = {1.0, 2, 0.215, 0.5, 1.0};
+ const bool kTestRateHasCadence[arraysize(kTestRates)] = {
+ true, true, false, true, true};
+
+ for (size_t i = 0; i < arraysize(kTestRates); ++i) {
+ const double playback_rate = kTestRates[i];
+ SCOPED_TRACE(base::StringPrintf("Playback Rate: %.03f", playback_rate));
+ time_source_.SetPlaybackRate(playback_rate);
+ RunFramePumpTest(false, &frame_tg, &display_tg,
+ [this](const scoped_refptr<VideoFrame>& frame,
+ size_t frames_dropped) {});
+ if (HasFatalFailure())
+ return;
+
+ ASSERT_EQ(kTestRateHasCadence[i], is_using_cadence());
+ }
+
+ // TODO(dalecurtis): Is there more we can test here?
+}
+
+// Ensures media which only expresses timestamps in milliseconds, gets the right
+// cadence detection.
+TEST_F(VideoRendererAlgorithmTest, UglyTimestampsHaveCadence) {
+ TickGenerator display_tg(tick_clock_->NowTicks(), 60);
+ time_source_.StartTicking();
+
+ // 59.94fps, timestamp deltas from https://youtu.be/byoLvAo9qjs
+ const int kBadTimestampsMs[] = {
+ 17, 16, 17, 17, 16, 17, 17, 16, 17, 17, 17, 16, 17, 17, 16, 17, 17, 16,
+ 17, 17, 16, 17, 17, 16, 17, 17, 16, 17, 17, 17, 16, 17, 17, 16, 17, 17,
+ 16, 17, 17, 16, 17, 17, 16, 17, 17, 16, 17, 17, 16, 17, 17, 17};
+
+ // Run throught ~1.6 seconds worth of frames.
+ bool cadence_detected = false;
+ base::TimeDelta timestamp;
+ for (size_t i = 0; i < arraysize(kBadTimestampsMs) * 2; ++i) {
+ while (algorithm_.EffectiveFramesQueued() < 3) {
+ algorithm_.EnqueueFrame(CreateFrame(timestamp));
+ timestamp += base::TimeDelta::FromMilliseconds(
+ kBadTimestampsMs[i % arraysize(kBadTimestampsMs)]);
+ }
+
+ size_t frames_dropped = 0;
+ RenderAndStep(&display_tg, &frames_dropped);
+ ASSERT_EQ(0u, frames_dropped);
+
+ // Cadence won't be detected immediately on this clip, but it will after
+ // enough frames are encountered; after which it should not drop out of
+ // cadence.
+ if (is_using_cadence())
+ cadence_detected = true;
+
+ if (cadence_detected)
+ ASSERT_TRUE(is_using_cadence());
+ }
+}
+
+TEST_F(VideoRendererAlgorithmTest, EnqueueFrames) {
+ TickGenerator tg(base::TimeTicks(), 50);
+ time_source_.StartTicking();
+
+ EXPECT_EQ(0u, frames_queued());
+ scoped_refptr<VideoFrame> frame_1 = CreateFrame(tg.interval(0));
+ algorithm_.EnqueueFrame(frame_1);
+ EXPECT_EQ(1u, frames_queued());
+
+ // Enqueuing a frame with the same timestamp should always be dropped.
+ scoped_refptr<VideoFrame> frame_2 = CreateFrame(tg.interval(0));
+ algorithm_.EnqueueFrame(frame_2);
+ EXPECT_EQ(1u, frames_queued());
+
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> rendered_frame =
+ RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(frame_1, rendered_frame);
+
+ // The replaced frame should count as dropped.
+ EXPECT_EQ(1u, frames_dropped);
+
+ // Trying to replace frame_1 with frame_2 should do nothing.
+ algorithm_.EnqueueFrame(frame_2);
+ EXPECT_EQ(1u, frames_queued());
+
+ rendered_frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(frame_1, rendered_frame);
+ EXPECT_EQ(1u, frames_dropped);
+
+ // Trying to add a frame < 1 ms after the last frame should drop the frame.
+ algorithm_.EnqueueFrame(CreateFrame(base::TimeDelta::FromMicroseconds(999)));
+ rendered_frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(frame_1, rendered_frame);
+ EXPECT_EQ(1u, frames_dropped);
+
+ scoped_refptr<VideoFrame> frame_3 = CreateFrame(tg.interval(1));
+ algorithm_.EnqueueFrame(frame_3);
+ EXPECT_EQ(2u, frames_queued());
+
+ // Trying to add a frame < 1 ms before the last frame should drop the frame.
+ algorithm_.EnqueueFrame(
+ CreateFrame(tg.interval(1) - base::TimeDelta::FromMicroseconds(999)));
+ rendered_frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(frame_3, rendered_frame);
+ EXPECT_EQ(1u, frames_dropped);
+}
+
+TEST_F(VideoRendererAlgorithmTest, CadenceForFutureFrames) {
+ TickGenerator tg(base::TimeTicks(), 50);
+ time_source_.StartTicking();
+
+ disable_cadence_hysteresis();
+
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(10)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(11)));
+ algorithm_.EnqueueFrame(CreateFrame(tg.interval(12)));
+ EXPECT_EQ(3u, frames_queued());
+
+ // Call Render() a few times to increment the render count.
+ for (int i = 0; i < 10; ++i) {
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> rendered_frame =
+ RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(3u, frames_queued());
+ EXPECT_EQ(tg.interval(10), rendered_frame->timestamp());
+ ASSERT_TRUE(is_using_cadence());
+ }
+
+ // Add some noise to the tick generator so it our first frame
+ // doesn't line up evenly on a deadline.
+ tg.Reset(tg.current() + base::TimeDelta::FromMilliseconds(5));
+
+ // We're now at the first frame, cadence should be one, so
+ // it should only be displayed once.
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> rendered_frame =
+ RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(3u, frames_queued());
+ EXPECT_EQ(tg.interval(10), rendered_frame->timestamp());
+ ASSERT_TRUE(is_using_cadence());
+
+ // Then the next frame should be displayed.
+ rendered_frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(2u, frames_queued());
+ EXPECT_EQ(tg.interval(11), rendered_frame->timestamp());
+ ASSERT_TRUE(is_using_cadence());
+
+ // Finally the last frame.
+ rendered_frame = RenderAndStep(&tg, &frames_dropped);
+ EXPECT_EQ(1u, frames_queued());
+ EXPECT_EQ(tg.interval(12), rendered_frame->timestamp());
+ ASSERT_TRUE(is_using_cadence());
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_renderer_impl.cc b/chromium/media/filters/video_renderer_impl.cc
deleted file mode 100644
index 1bd3f270ab9..00000000000
--- a/chromium/media/filters/video_renderer_impl.cc
+++ /dev/null
@@ -1,450 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/video_renderer_impl.h"
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/callback_helpers.h"
-#include "base/debug/trace_event.h"
-#include "base/location.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/platform_thread.h"
-#include "media/base/bind_to_current_loop.h"
-#include "media/base/buffers.h"
-#include "media/base/limits.h"
-#include "media/base/pipeline.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-VideoRendererImpl::VideoRendererImpl(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- ScopedVector<VideoDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
- const PaintCB& paint_cb,
- bool drop_frames,
- const scoped_refptr<MediaLog>& media_log)
- : task_runner_(task_runner),
- video_frame_stream_(new VideoFrameStream(task_runner,
- decoders.Pass(),
- set_decryptor_ready_cb,
- media_log)),
- low_delay_(false),
- received_end_of_stream_(false),
- rendered_end_of_stream_(false),
- frame_available_(&lock_),
- state_(kUninitialized),
- thread_(),
- pending_read_(false),
- drop_frames_(drop_frames),
- buffering_state_(BUFFERING_HAVE_NOTHING),
- paint_cb_(paint_cb),
- last_timestamp_(kNoTimestamp()),
- last_painted_timestamp_(kNoTimestamp()),
- frames_decoded_(0),
- frames_dropped_(0),
- is_shutting_down_(false),
- weak_factory_(this) {
- DCHECK(!paint_cb_.is_null());
-}
-
-VideoRendererImpl::~VideoRendererImpl() {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- {
- base::AutoLock auto_lock(lock_);
- is_shutting_down_ = true;
- frame_available_.Signal();
- }
-
- if (!thread_.is_null())
- base::PlatformThread::Join(thread_);
-
- if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
-
- if (!flush_cb_.is_null())
- base::ResetAndReturn(&flush_cb_).Run();
-}
-
-void VideoRendererImpl::Flush(const base::Closure& callback) {
- DVLOG(1) << __FUNCTION__;
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kPlaying);
- flush_cb_ = callback;
- state_ = kFlushing;
-
- // This is necessary if the |video_frame_stream_| has already seen an end of
- // stream and needs to drain it before flushing it.
- ready_frames_.clear();
- if (buffering_state_ != BUFFERING_HAVE_NOTHING) {
- buffering_state_ = BUFFERING_HAVE_NOTHING;
- buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
- }
- received_end_of_stream_ = false;
- rendered_end_of_stream_ = false;
-
- video_frame_stream_->Reset(
- base::Bind(&VideoRendererImpl::OnVideoFrameStreamResetDone,
- weak_factory_.GetWeakPtr()));
-}
-
-void VideoRendererImpl::StartPlayingFrom(base::TimeDelta timestamp) {
- DVLOG(1) << __FUNCTION__ << "(" << timestamp.InMicroseconds() << ")";
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kFlushed);
- DCHECK(!pending_read_);
- DCHECK(ready_frames_.empty());
- DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
-
- state_ = kPlaying;
- start_timestamp_ = timestamp;
- AttemptRead_Locked();
-}
-
-void VideoRendererImpl::Initialize(DemuxerStream* stream,
- bool low_delay,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK(stream);
- DCHECK_EQ(stream->type(), DemuxerStream::VIDEO);
- DCHECK(!init_cb.is_null());
- DCHECK(!statistics_cb.is_null());
- DCHECK(!buffering_state_cb.is_null());
- DCHECK(!ended_cb.is_null());
- DCHECK(!get_time_cb.is_null());
- DCHECK_EQ(kUninitialized, state_);
-
- low_delay_ = low_delay;
-
- // Always post |init_cb_| because |this| could be destroyed if initialization
- // failed.
- init_cb_ = BindToCurrentLoop(init_cb);
-
- statistics_cb_ = statistics_cb;
- buffering_state_cb_ = buffering_state_cb;
- ended_cb_ = ended_cb;
- error_cb_ = error_cb;
- get_time_cb_ = get_time_cb;
- state_ = kInitializing;
-
- video_frame_stream_->Initialize(
- stream,
- low_delay,
- statistics_cb,
- base::Bind(&VideoRendererImpl::OnVideoFrameStreamInitialized,
- weak_factory_.GetWeakPtr()));
-}
-
-void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kInitializing);
-
- if (!success) {
- state_ = kUninitialized;
- base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
- return;
- }
-
- // We're all good! Consider ourselves flushed. (ThreadMain() should never
- // see us in the kUninitialized state).
- // Since we had an initial Preroll(), we consider ourself flushed, because we
- // have not populated any buffers yet.
- state_ = kFlushed;
-
- // Create our video thread.
- CHECK(base::PlatformThread::Create(0, this, &thread_));
-
-#if defined(OS_WIN)
- // Bump up our priority so our sleeping is more accurate.
- // TODO(scherkus): find out if this is necessary, but it seems to help.
- ::SetThreadPriority(thread_.platform_handle(), THREAD_PRIORITY_ABOVE_NORMAL);
-#endif // defined(OS_WIN)
- base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
-}
-
-// PlatformThread::Delegate implementation.
-void VideoRendererImpl::ThreadMain() {
- base::PlatformThread::SetName("CrVideoRenderer");
-
- // The number of milliseconds to idle when we do not have anything to do.
- // Nothing special about the value, other than we're being more OS-friendly
- // than sleeping for 1 millisecond.
- //
- // TODO(scherkus): switch to pure event-driven frame timing instead of this
- // kIdleTimeDelta business http://crbug.com/106874
- const base::TimeDelta kIdleTimeDelta =
- base::TimeDelta::FromMilliseconds(10);
-
- // If we have no frames and haven't painted any frame for certain amount of
- // time, declare BUFFERING_HAVE_NOTHING.
- const base::TimeDelta kTimeToDeclareHaveNothing =
- base::TimeDelta::FromSeconds(3);
-
- for (;;) {
- base::AutoLock auto_lock(lock_);
-
- // Thread exit condition.
- if (is_shutting_down_)
- return;
-
- // Remain idle as long as we're not playing.
- if (state_ != kPlaying || buffering_state_ != BUFFERING_HAVE_ENOUGH) {
- UpdateStatsAndWait_Locked(kIdleTimeDelta);
- continue;
- }
-
- base::TimeDelta now = get_time_cb_.Run();
-
- // Remain idle until we have the next frame ready for rendering.
- if (ready_frames_.empty()) {
- if (received_end_of_stream_) {
- if (!rendered_end_of_stream_) {
- rendered_end_of_stream_ = true;
- task_runner_->PostTask(FROM_HERE, ended_cb_);
- }
- } else if (last_painted_timestamp_ != kNoTimestamp() &&
- now - last_painted_timestamp_ >= kTimeToDeclareHaveNothing) {
- buffering_state_ = BUFFERING_HAVE_NOTHING;
- task_runner_->PostTask(
- FROM_HERE, base::Bind(buffering_state_cb_, BUFFERING_HAVE_NOTHING));
- }
-
- UpdateStatsAndWait_Locked(kIdleTimeDelta);
- continue;
- }
-
- base::TimeDelta target_paint_timestamp = ready_frames_.front()->timestamp();
- base::TimeDelta latest_paint_timestamp;
-
- // Deadline is defined as the duration between this frame and the next
- // frame, using the delta between this frame and the previous frame as the
- // assumption for frame duration.
- //
- // TODO(scherkus): This can be vastly improved. Use a histogram to measure
- // the accuracy of our frame timing code. http://crbug.com/149829
- if (last_timestamp_ == kNoTimestamp()) {
- latest_paint_timestamp = base::TimeDelta::Max();
- } else {
- base::TimeDelta duration = target_paint_timestamp - last_timestamp_;
- latest_paint_timestamp = target_paint_timestamp + duration;
- }
-
- // Remain idle until we've reached our target paint window.
- if (now < target_paint_timestamp) {
- UpdateStatsAndWait_Locked(kIdleTimeDelta);
- continue;
- }
-
- if (now > latest_paint_timestamp && drop_frames_) {
- DropNextReadyFrame_Locked();
- continue;
- }
-
- // Congratulations! You've made it past the video frame timing gauntlet.
- //
- // At this point enough time has passed that the next frame that ready for
- // rendering.
- PaintNextReadyFrame_Locked();
- }
-}
-
-void VideoRendererImpl::PaintNextReadyFrame_Locked() {
- lock_.AssertAcquired();
-
- scoped_refptr<VideoFrame> next_frame = ready_frames_.front();
- ready_frames_.pop_front();
- frames_decoded_++;
-
- last_timestamp_ = next_frame->timestamp();
- last_painted_timestamp_ = next_frame->timestamp();
-
- paint_cb_.Run(next_frame);
-
- task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
-}
-
-void VideoRendererImpl::DropNextReadyFrame_Locked() {
- TRACE_EVENT0("media", "VideoRendererImpl:frameDropped");
-
- lock_.AssertAcquired();
-
- last_timestamp_ = ready_frames_.front()->timestamp();
- ready_frames_.pop_front();
- frames_decoded_++;
- frames_dropped_++;
-
- task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
-}
-
-void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
- const scoped_refptr<VideoFrame>& frame) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK_NE(state_, kUninitialized);
- DCHECK_NE(state_, kFlushed);
-
- CHECK(pending_read_);
- pending_read_ = false;
-
- if (status == VideoFrameStream::DECODE_ERROR ||
- status == VideoFrameStream::DECRYPT_ERROR) {
- DCHECK(!frame.get());
- PipelineStatus error = PIPELINE_ERROR_DECODE;
- if (status == VideoFrameStream::DECRYPT_ERROR)
- error = PIPELINE_ERROR_DECRYPT;
- task_runner_->PostTask(FROM_HERE, base::Bind(error_cb_, error));
- return;
- }
-
- // Already-queued VideoFrameStream ReadCB's can fire after various state
- // transitions have happened; in that case just drop those frames immediately.
- if (state_ == kFlushing)
- return;
-
- DCHECK_EQ(state_, kPlaying);
-
- // Can happen when demuxers are preparing for a new Seek().
- if (!frame.get()) {
- DCHECK_EQ(status, VideoFrameStream::DEMUXER_READ_ABORTED);
- return;
- }
-
- if (frame->end_of_stream()) {
- DCHECK(!received_end_of_stream_);
- received_end_of_stream_ = true;
- } else {
- // Maintain the latest frame decoded so the correct frame is displayed after
- // prerolling has completed.
- if (frame->timestamp() <= start_timestamp_)
- ready_frames_.clear();
- AddReadyFrame_Locked(frame);
- }
-
- // Signal buffering state if we've met our conditions for having enough data.
- if (buffering_state_ != BUFFERING_HAVE_ENOUGH && HaveEnoughData_Locked())
- TransitionToHaveEnough_Locked();
-
- // Always request more decoded video if we have capacity. This serves two
- // purposes:
- // 1) Prerolling while paused
- // 2) Keeps decoding going if video rendering thread starts falling behind
- AttemptRead_Locked();
-}
-
-bool VideoRendererImpl::HaveEnoughData_Locked() {
- DCHECK_EQ(state_, kPlaying);
- return received_end_of_stream_ ||
- !video_frame_stream_->CanReadWithoutStalling() ||
- ready_frames_.size() >= static_cast<size_t>(limits::kMaxVideoFrames) ||
- (low_delay_ && ready_frames_.size() > 0);
-}
-
-void VideoRendererImpl::TransitionToHaveEnough_Locked() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
-
- if (!ready_frames_.empty()) {
- // Because the clock might remain paused in for an undetermined amount
- // of time (e.g., seeking while paused), paint the first frame.
- PaintNextReadyFrame_Locked();
- }
-
- buffering_state_ = BUFFERING_HAVE_ENOUGH;
- buffering_state_cb_.Run(BUFFERING_HAVE_ENOUGH);
-}
-
-void VideoRendererImpl::AddReadyFrame_Locked(
- const scoped_refptr<VideoFrame>& frame) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- lock_.AssertAcquired();
- DCHECK(!frame->end_of_stream());
-
- ready_frames_.push_back(frame);
- DCHECK_LE(ready_frames_.size(),
- static_cast<size_t>(limits::kMaxVideoFrames));
-
- // Avoid needlessly waking up |thread_| unless playing.
- if (state_ == kPlaying)
- frame_available_.Signal();
-}
-
-void VideoRendererImpl::AttemptRead() {
- base::AutoLock auto_lock(lock_);
- AttemptRead_Locked();
-}
-
-void VideoRendererImpl::AttemptRead_Locked() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- lock_.AssertAcquired();
-
- if (pending_read_ || received_end_of_stream_ ||
- ready_frames_.size() == static_cast<size_t>(limits::kMaxVideoFrames)) {
- return;
- }
-
- switch (state_) {
- case kPlaying:
- pending_read_ = true;
- video_frame_stream_->Read(base::Bind(&VideoRendererImpl::FrameReady,
- weak_factory_.GetWeakPtr()));
- return;
-
- case kUninitialized:
- case kInitializing:
- case kFlushing:
- case kFlushed:
- return;
- }
-}
-
-void VideoRendererImpl::OnVideoFrameStreamResetDone() {
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(kFlushing, state_);
- DCHECK(!pending_read_);
- DCHECK(ready_frames_.empty());
- DCHECK(!received_end_of_stream_);
- DCHECK(!rendered_end_of_stream_);
- DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
-
- state_ = kFlushed;
- last_timestamp_ = kNoTimestamp();
- last_painted_timestamp_ = kNoTimestamp();
- base::ResetAndReturn(&flush_cb_).Run();
-}
-
-void VideoRendererImpl::UpdateStatsAndWait_Locked(
- base::TimeDelta wait_duration) {
- lock_.AssertAcquired();
- DCHECK_GE(frames_decoded_, 0);
- DCHECK_LE(frames_dropped_, frames_decoded_);
-
- if (frames_decoded_) {
- PipelineStatistics statistics;
- statistics.video_frames_decoded = frames_decoded_;
- statistics.video_frames_dropped = frames_dropped_;
- task_runner_->PostTask(FROM_HERE, base::Bind(statistics_cb_, statistics));
-
- frames_decoded_ = 0;
- frames_dropped_ = 0;
- }
-
- frame_available_.TimedWait(wait_duration);
-}
-
-} // namespace media
diff --git a/chromium/media/filters/vp8_bool_decoder.cc b/chromium/media/filters/vp8_bool_decoder.cc
new file mode 100644
index 00000000000..31b20e7bf0c
--- /dev/null
+++ b/chromium/media/filters/vp8_bool_decoder.cc
@@ -0,0 +1,206 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+/*
+ * Copyright (c) 2010, The WebM Project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google, nor the WebM Project, nor the names
+ * of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file is modified from the dboolhuff.{c,h} from the WebM's libvpx
+// project. (http://www.webmproject.org/code)
+// It is used to decode bits from a vp8 stream.
+
+#include <algorithm>
+
+#include "base/numerics/safe_conversions.h"
+#include "media/filters/vp8_bool_decoder.h"
+
+namespace media {
+
+#define VP8_BD_VALUE_BIT \
+ static_cast<int>(sizeof(Vp8BoolDecoder::value_) * CHAR_BIT)
+
+static const int kDefaultProbability = 0x80; // 0x80 / 256 = 0.5
+
+// This is meant to be a large, positive constant that can still be efficiently
+// loaded as an immediate (on platforms like ARM, for example). Even relatively
+// modest values like 100 would work fine.
+#define VP8_LOTS_OF_BITS (0x40000000)
+
+// The number of leading zeros.
+static const unsigned char kVp8Norm[256] = {
+ 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+Vp8BoolDecoder::Vp8BoolDecoder()
+ : user_buffer_(NULL),
+ user_buffer_end_(NULL),
+ value_(0),
+ count_(-8),
+ range_(255) {
+}
+
+bool Vp8BoolDecoder::Initialize(const uint8_t* data, size_t size) {
+ if (data == NULL || size == 0)
+ return false;
+ user_buffer_start_ = data;
+ user_buffer_ = data;
+ user_buffer_end_ = data + size;
+ value_ = 0;
+ count_ = -8;
+ range_ = 255;
+ return true;
+}
+
+void Vp8BoolDecoder::FillDecoder() {
+ DCHECK(user_buffer_ != NULL);
+ int shift = VP8_BD_VALUE_BIT - CHAR_BIT - (count_ + CHAR_BIT);
+ size_t bytes_left = user_buffer_end_ - user_buffer_;
+ size_t bits_left = bytes_left * CHAR_BIT;
+ int x = static_cast<int>(shift + CHAR_BIT - bits_left);
+ int loop_end = 0;
+
+ if (x >= 0) {
+ count_ += VP8_LOTS_OF_BITS;
+ loop_end = x;
+ }
+
+ if (x < 0 || bits_left) {
+ while (shift >= loop_end) {
+ count_ += CHAR_BIT;
+ value_ |= static_cast<size_t>(*user_buffer_) << shift;
+ ++user_buffer_;
+ shift -= CHAR_BIT;
+ }
+ }
+}
+
+int Vp8BoolDecoder::ReadBit(int probability) {
+ int bit = 0;
+ size_t split = 1 + (((range_ - 1) * probability) >> 8);
+ if (count_ < 0)
+ FillDecoder();
+ size_t bigsplit = static_cast<size_t>(split) << (VP8_BD_VALUE_BIT - 8);
+
+ if (value_ >= bigsplit) {
+ range_ -= split;
+ value_ -= bigsplit;
+ bit = 1;
+ } else {
+ range_ = split;
+ }
+
+ size_t shift = kVp8Norm[range_];
+ range_ <<= shift;
+ value_ <<= shift;
+ count_ -= shift;
+
+ DCHECK_EQ(1U, (range_ >> 7)); // In the range [128, 255].
+
+ return bit;
+}
+
+bool Vp8BoolDecoder::ReadLiteral(size_t num_bits, int* out) {
+ DCHECK_LE(num_bits, sizeof(int) * CHAR_BIT);
+ *out = 0;
+ for (; num_bits > 0; --num_bits)
+ *out = (*out << 1) | ReadBit(kDefaultProbability);
+ return !OutOfBuffer();
+}
+
+bool Vp8BoolDecoder::ReadBool(bool* out, uint8_t probability) {
+ *out = !!ReadBit(probability);
+ return !OutOfBuffer();
+}
+
+bool Vp8BoolDecoder::ReadBool(bool* out) {
+ return ReadBool(out, kDefaultProbability);
+}
+
+bool Vp8BoolDecoder::ReadLiteralWithSign(size_t num_bits, int* out) {
+ ReadLiteral(num_bits, out);
+ // Read sign.
+ if (ReadBit(kDefaultProbability))
+ *out = -*out;
+ return !OutOfBuffer();
+}
+
+size_t Vp8BoolDecoder::BitOffset() {
+ int bit_count = count_ + 8;
+ if (bit_count > VP8_BD_VALUE_BIT)
+ // Capped at 0 to ignore buffer underrun.
+ bit_count = std::max(0, bit_count - VP8_LOTS_OF_BITS);
+ return (user_buffer_ - user_buffer_start_) * 8 - bit_count;
+}
+
+uint8_t Vp8BoolDecoder::GetRange() {
+ return base::checked_cast<uint8_t>(range_);
+}
+
+uint8_t Vp8BoolDecoder::GetBottom() {
+ if (count_ < 0)
+ FillDecoder();
+ return static_cast<uint8_t>(value_ >> (VP8_BD_VALUE_BIT - 8));
+}
+
+inline bool Vp8BoolDecoder::OutOfBuffer() {
+ // Check if we have reached the end of the buffer.
+ //
+ // Variable |count_| stores the number of bits in the |value_| buffer, minus
+ // 8. The top byte is part of the algorithm and the remainder is buffered to
+ // be shifted into it. So, if |count_| == 8, the top 16 bits of |value_| are
+ // occupied, 8 for the algorithm and 8 in the buffer.
+ //
+ // When reading a byte from the user's buffer, |count_| is filled with 8 and
+ // one byte is filled into the |value_| buffer. When we reach the end of the
+ // data, |count_| is additionally filled with VP8_LOTS_OF_BITS. So when
+ // |count_| == VP8_LOTS_OF_BITS - 1, the user's data has been exhausted.
+ return (count_ > VP8_BD_VALUE_BIT) && (count_ < VP8_LOTS_OF_BITS);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/vp8_bool_decoder.h b/chromium/media/filters/vp8_bool_decoder.h
new file mode 100644
index 00000000000..cea701b6a83
--- /dev/null
+++ b/chromium/media/filters/vp8_bool_decoder.h
@@ -0,0 +1,133 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+/*
+ * Copyright (c) 2010, The WebM Project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google, nor the WebM Project, nor the names
+ * of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file is modified from the dboolhuff.{c,h} from the WebM's libvpx
+// project. (http://www.webmproject.org/code)
+// It is used to decode bits from a vp8 stream.
+
+#ifndef MEDIA_FILTERS_VP8_BOOL_DECODER_H_
+#define MEDIA_FILTERS_VP8_BOOL_DECODER_H_
+
+#include <sys/types.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A class to decode the VP8's boolean entropy coded stream. It's a variant of
+// arithmetic coding. See RFC 6386 - Chapter 7. Boolean Entropy Decoder.
+class MEDIA_EXPORT Vp8BoolDecoder {
+ public:
+ Vp8BoolDecoder();
+
+ // Initializes the decoder to start decoding |data|, |size| being size
+ // of |data| in bytes. Returns false if |data| is NULL or empty.
+ bool Initialize(const uint8_t* data, size_t size);
+
+ // Reads a boolean from the coded stream. Returns false if it has reached the
+ // end of |data| and failed to read the boolean. The probability of |out| to
+ // be true is |probability| / 256, e.g., when |probability| is 0x80, the
+ // chance is 1/2 (i.e., 0x80 / 256).
+ bool ReadBool(bool* out, uint8_t probability);
+
+ // Reads a boolean from the coded stream with the default probability 1/2.
+ // Returns false if it has reached the end of |data| and failed to read the
+ // boolean.
+ bool ReadBool(bool* out);
+
+ // Reads a "literal", that is, a "num_bits"-wide unsigned value whose bits
+ // come high- to low-order, with each bit encoded at probability 1/2.
+ // Returns false if it has reached the end of |data| and failed to read the
+ // literal.
+ bool ReadLiteral(size_t num_bits, int* out);
+
+ // Reads a literal with sign from the coded stream. This is similar to
+ // the ReadListeral(), it first read a "num_bits"-wide unsigned value, and
+ // then read an extra bit as the sign of the literal. Returns false if it has
+ // reached the end of |data| and failed to read the literal or the sign.
+ // This is different from the "read_signed_literal(d, n)" defined in RFC 6386.
+ bool ReadLiteralWithSign(size_t num_bits, int* out);
+
+ // The following methods are used to get the internal states of the decoder.
+
+ // Returns the bit offset to the current top bit of the coded stream. It is
+ // also the number of bits that have been written in the corresponding
+ // encoding state. More specifically, we have the following constraint:
+ // w + (bottom * S) <= v < w + (bottom + range) * S,
+ // where "w" is for the bits already written,
+ // "v" is for the possible values of the coded number.
+ // "S" is the scale for the current bit position,
+ // i.e., S = pow(2, -(n + 8)), where "n" is the bit number of "w".
+ // BitOffset() returns the bit count of "w", i.e., "n".
+ size_t BitOffset();
+
+ // Gets the "bottom" of the current coded value. See BitOffset() for
+ // more details.
+ uint8_t GetBottom();
+
+ // Gets the "range" of the current coded value. See BitOffset() for
+ // more details.
+ uint8_t GetRange();
+
+ private:
+ // Reads the next bit from the coded stream. The probability of the bit to
+ // be one is |probability| / 256.
+ int ReadBit(int probability);
+
+ // Fills more bits from |user_buffer_| to |value_|. We shall keep at least 8
+ // bits of the current |user_buffer_| in |value_|.
+ void FillDecoder();
+
+ // Returns true iff we have ran out of bits.
+ bool OutOfBuffer();
+
+ const uint8_t* user_buffer_;
+ const uint8_t* user_buffer_start_;
+ const uint8_t* user_buffer_end_;
+ size_t value_;
+ int count_;
+ size_t range_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8BoolDecoder);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VP8_BOOL_DECODER_H_
diff --git a/chromium/media/filters/vp8_bool_decoder_unittest.cc b/chromium/media/filters/vp8_bool_decoder_unittest.cc
new file mode 100644
index 00000000000..83fdfbce83d
--- /dev/null
+++ b/chromium/media/filters/vp8_bool_decoder_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/vp8_bool_decoder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+const static size_t NUM_BITS_TO_TEST = 100;
+
+namespace {
+
+// 100 zeros with probability of 0x80.
+const uint8_t kDataZerosAndEvenProbabilities[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00};
+
+// 100 ones with probability of 0x80.
+const uint8_t kDataOnesAndEvenProbabilities[] = {
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xf0, 0x20};
+
+// [0, 1, 0, 1, ..., 1] with probability [0, 1, 2, 3, ..., 99].
+const uint8_t kDataParitiesAndIncreasingProbabilities[] = {
+ 0x00, 0x02, 0x08, 0x31, 0x8e, 0xca, 0xab, 0xe2, 0xc8, 0x31, 0x12,
+ 0xb3, 0x2c, 0x19, 0x90, 0xc6, 0x6a, 0xeb, 0x17, 0x52, 0x30};
+
+} // namespace
+
+class Vp8BoolDecoderTest : public ::testing::Test {
+ public:
+ Vp8BoolDecoderTest() {}
+
+ protected:
+ // Fixture member, the bool decoder to be tested.
+ Vp8BoolDecoder bd_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Vp8BoolDecoderTest);
+};
+
+#define INITIALIZE(data) ASSERT_TRUE(bd_.Initialize(data, sizeof(data)));
+
+TEST_F(Vp8BoolDecoderTest, DecodeBoolsWithZerosAndEvenProbabilities) {
+ INITIALIZE(kDataZerosAndEvenProbabilities);
+ ASSERT_EQ(0u, bd_.BitOffset());
+ for (size_t i = 0; i < NUM_BITS_TO_TEST; ++i) {
+ bool out = true;
+ ASSERT_TRUE(bd_.ReadBool(&out, 0x80));
+ ASSERT_FALSE(out);
+ ASSERT_EQ(i, bd_.BitOffset());
+ }
+}
+
+TEST_F(Vp8BoolDecoderTest, DecodeLiteralsWithZerosAndEvenProbabilities) {
+ INITIALIZE(kDataZerosAndEvenProbabilities);
+
+ int value = 1;
+ ASSERT_TRUE(bd_.ReadLiteral(1, &value));
+ ASSERT_EQ(0, value);
+
+ value = 1;
+ ASSERT_TRUE(bd_.ReadLiteral(32, &value));
+ ASSERT_EQ(0, value);
+
+ value = 1;
+ ASSERT_TRUE(bd_.ReadLiteralWithSign(1, &value));
+ ASSERT_EQ(0, value);
+
+ value = 1;
+ ASSERT_TRUE(bd_.ReadLiteralWithSign(31, &value));
+ ASSERT_EQ(0, value);
+}
+
+TEST_F(Vp8BoolDecoderTest, DecodeBoolsWithOnesAndEvenProbabilities) {
+ INITIALIZE(kDataOnesAndEvenProbabilities);
+
+ ASSERT_EQ(0u, bd_.BitOffset());
+ for (size_t i = 0; i < NUM_BITS_TO_TEST; ++i) {
+ bool out = false;
+ ASSERT_TRUE(bd_.ReadBool(&out, 0x80));
+ ASSERT_TRUE(out);
+ ASSERT_EQ(i + 1, bd_.BitOffset());
+ }
+}
+
+TEST_F(Vp8BoolDecoderTest, DecodeLiteralsWithOnesAndEvenProbabilities) {
+ INITIALIZE(kDataOnesAndEvenProbabilities);
+
+ int value = 0;
+ ASSERT_TRUE(bd_.ReadLiteral(1, &value));
+ EXPECT_EQ(1, value);
+
+ value = 0;
+ ASSERT_TRUE(bd_.ReadLiteral(31, &value));
+ EXPECT_EQ(0x7FFFFFFF, value);
+
+ value = 0;
+ ASSERT_TRUE(bd_.ReadLiteralWithSign(1, &value));
+ EXPECT_EQ(-1, value);
+
+ value = 0;
+ ASSERT_TRUE(bd_.ReadLiteralWithSign(31, &value));
+ EXPECT_EQ(-0x7FFFFFFF, value);
+}
+
+TEST_F(Vp8BoolDecoderTest, DecodeBoolsWithParitiesAndIncreasingProbabilities) {
+ INITIALIZE(kDataParitiesAndIncreasingProbabilities);
+
+ for (size_t i = 0; i < NUM_BITS_TO_TEST; ++i) {
+ bool out = !(i & 1);
+ ASSERT_TRUE(bd_.ReadBool(&out, static_cast<int>(i)));
+ EXPECT_EQ(out, !!(i & 1));
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/filters/vp8_parser.cc b/chromium/media/filters/vp8_parser.cc
new file mode 100644
index 00000000000..5b49673d86e
--- /dev/null
+++ b/chromium/media/filters/vp8_parser.cc
@@ -0,0 +1,872 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP8 raw stream parser,
+// as defined in RFC 6386.
+
+#include "base/logging.h"
+#include "media/filters/vp8_parser.h"
+
+namespace media {
+
+#define ERROR_RETURN(what) \
+ do { \
+ DVLOG(1) << "Error while trying to read " #what; \
+ return false; \
+ } while (0)
+
+#define BD_READ_BOOL_OR_RETURN(out) \
+ do { \
+ if (!bd_.ReadBool(out)) \
+ ERROR_RETURN(out); \
+ } while (0)
+
+#define BD_READ_BOOL_WITH_PROB_OR_RETURN(out, prob) \
+ do { \
+ if (!bd_.ReadBool(out, prob)) \
+ ERROR_RETURN(out); \
+ } while (0)
+
+#define BD_READ_UNSIGNED_OR_RETURN(num_bits, out) \
+ do { \
+ int _out; \
+ if (!bd_.ReadLiteral(num_bits, &_out)) \
+ ERROR_RETURN(out); \
+ *out = _out; \
+ } while (0)
+
+#define BD_READ_SIGNED_OR_RETURN(num_bits, out) \
+ do { \
+ int _out; \
+ if (!bd_.ReadLiteralWithSign(num_bits, &_out)) \
+ ERROR_RETURN(out); \
+ *out = _out; \
+ } while (0)
+
+Vp8FrameHeader::Vp8FrameHeader() {
+ memset(this, 0, sizeof(*this));
+}
+
+Vp8Parser::Vp8Parser() : stream_(nullptr), bytes_left_(0) {
+}
+
+Vp8Parser::~Vp8Parser() {
+}
+
+bool Vp8Parser::ParseFrame(const uint8_t* ptr,
+ size_t frame_size,
+ Vp8FrameHeader* fhdr) {
+ stream_ = ptr;
+ bytes_left_ = frame_size;
+
+ memset(fhdr, 0, sizeof(*fhdr));
+ fhdr->data = stream_;
+ fhdr->frame_size = bytes_left_;
+
+ if (!ParseFrameTag(fhdr))
+ return false;
+
+ fhdr->first_part_offset = stream_ - fhdr->data;
+
+ if (!ParseFrameHeader(fhdr))
+ return false;
+
+ if (!ParsePartitions(fhdr))
+ return false;
+
+ DVLOG(4) << "Frame parsed, start: " << static_cast<const void*>(ptr)
+ << ", size: " << frame_size
+ << ", offsets: to first_part=" << fhdr->first_part_offset
+ << ", to macroblock data (in bits)=" << fhdr->macroblock_bit_offset;
+
+ return true;
+}
+
+static inline uint32_t GetBitsAt(uint32_t data, size_t shift, size_t num_bits) {
+ return ((data >> shift) & ((1 << num_bits) - 1));
+}
+
+bool Vp8Parser::ParseFrameTag(Vp8FrameHeader* fhdr) {
+ const size_t kFrameTagSize = 3;
+ if (bytes_left_ < kFrameTagSize)
+ return false;
+
+ uint32_t frame_tag = (stream_[2] << 16) | (stream_[1] << 8) | stream_[0];
+ fhdr->key_frame =
+ static_cast<Vp8FrameHeader::FrameType>(GetBitsAt(frame_tag, 0, 1));
+ fhdr->version = GetBitsAt(frame_tag, 1, 2);
+ fhdr->is_experimental = !!GetBitsAt(frame_tag, 3, 1);
+ fhdr->show_frame =!!GetBitsAt(frame_tag, 4, 1);
+ fhdr->first_part_size = GetBitsAt(frame_tag, 5, 19);
+
+ stream_ += kFrameTagSize;
+ bytes_left_ -= kFrameTagSize;
+
+ if (fhdr->IsKeyframe()) {
+ const size_t kKeyframeTagSize = 7;
+ if (bytes_left_ < kKeyframeTagSize)
+ return false;
+
+ static const uint8_t kVp8StartCode[] = {0x9d, 0x01, 0x2a};
+ if (memcmp(stream_, kVp8StartCode, sizeof(kVp8StartCode)) != 0)
+ return false;
+
+ stream_ += sizeof(kVp8StartCode);
+ bytes_left_ -= sizeof(kVp8StartCode);
+
+ uint16_t data = (stream_[1] << 8) | stream_[0];
+ fhdr->width = data & 0x3fff;
+ fhdr->horizontal_scale = data >> 14;
+
+ data = (stream_[3] << 8) | stream_[2];
+ fhdr->height = data & 0x3fff;
+ fhdr->vertical_scale = data >> 14;
+
+ stream_ += 4;
+ bytes_left_ -= 4;
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseFrameHeader(Vp8FrameHeader* fhdr) {
+ if (!bd_.Initialize(stream_, bytes_left_))
+ return false;
+
+ bool keyframe = fhdr->IsKeyframe();
+ if (keyframe) {
+ unsigned int data;
+ BD_READ_UNSIGNED_OR_RETURN(1, &data); // color_space
+ BD_READ_UNSIGNED_OR_RETURN(1, &data); // clamping_type
+ }
+
+ if (!ParseSegmentationHeader(keyframe))
+ return false;
+
+ fhdr->segmentation_hdr = curr_segmentation_hdr_;
+
+ if (!ParseLoopFilterHeader(keyframe))
+ return false;
+
+ fhdr->loopfilter_hdr = curr_loopfilter_hdr_;
+
+ int log2_nbr_of_dct_partitions;
+ BD_READ_UNSIGNED_OR_RETURN(2, &log2_nbr_of_dct_partitions);
+ fhdr->num_of_dct_partitions =
+ static_cast<size_t>(1 << log2_nbr_of_dct_partitions);
+
+ if (!ParseQuantizationHeader(&fhdr->quantization_hdr))
+ return false;
+
+ if (keyframe) {
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_entropy_probs);
+ } else {
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_golden_frame);
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_alternate_frame);
+
+ int refresh_mode;
+ if (!fhdr->refresh_golden_frame) {
+ BD_READ_UNSIGNED_OR_RETURN(2, &refresh_mode);
+ fhdr->copy_buffer_to_golden =
+ static_cast<Vp8FrameHeader::GoldenRefreshMode>(refresh_mode);
+ }
+
+ if (!fhdr->refresh_alternate_frame) {
+ BD_READ_UNSIGNED_OR_RETURN(2, &refresh_mode);
+ fhdr->copy_buffer_to_alternate =
+ static_cast<Vp8FrameHeader::AltRefreshMode>(refresh_mode);
+ }
+
+ BD_READ_UNSIGNED_OR_RETURN(1, &fhdr->sign_bias_golden);
+ BD_READ_UNSIGNED_OR_RETURN(1, &fhdr->sign_bias_alternate);
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_entropy_probs);
+ BD_READ_BOOL_OR_RETURN(&fhdr->refresh_last);
+ }
+
+ if (keyframe)
+ ResetProbs();
+
+ fhdr->entropy_hdr = curr_entropy_hdr_;
+
+ if (!ParseTokenProbs(&fhdr->entropy_hdr, fhdr->refresh_entropy_probs))
+ return false;
+
+ BD_READ_BOOL_OR_RETURN(&fhdr->mb_no_skip_coeff);
+ if (fhdr->mb_no_skip_coeff)
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_skip_false);
+
+ if (!keyframe) {
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_intra);
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_last);
+ BD_READ_UNSIGNED_OR_RETURN(8, &fhdr->prob_gf);
+ }
+
+ if (!ParseIntraProbs(&fhdr->entropy_hdr, fhdr->refresh_entropy_probs,
+ keyframe))
+ return false;
+
+ if (!keyframe) {
+ if (!ParseMVProbs(&fhdr->entropy_hdr, fhdr->refresh_entropy_probs))
+ return false;
+ }
+
+ fhdr->macroblock_bit_offset = bd_.BitOffset();
+ fhdr->bool_dec_range = bd_.GetRange();
+ fhdr->bool_dec_value = bd_.GetBottom();
+ fhdr->bool_dec_count = 7 - (bd_.BitOffset() + 7) % 8;
+
+ return true;
+}
+
+bool Vp8Parser::ParseSegmentationHeader(bool keyframe) {
+ Vp8SegmentationHeader* shdr = &curr_segmentation_hdr_;
+
+ if (keyframe)
+ memset(shdr, 0, sizeof(*shdr));
+
+ BD_READ_BOOL_OR_RETURN(&shdr->segmentation_enabled);
+ if (!shdr->segmentation_enabled)
+ return true;
+
+ BD_READ_BOOL_OR_RETURN(&shdr->update_mb_segmentation_map);
+ BD_READ_BOOL_OR_RETURN(&shdr->update_segment_feature_data);
+ if (shdr->update_segment_feature_data) {
+ int mode;
+ BD_READ_UNSIGNED_OR_RETURN(1, &mode);
+ shdr->segment_feature_mode =
+ static_cast<Vp8SegmentationHeader::SegmentFeatureMode>(mode);
+
+ for (size_t i = 0; i < kMaxMBSegments; ++i) {
+ bool quantizer_update;
+ BD_READ_BOOL_OR_RETURN(&quantizer_update);
+ if (quantizer_update)
+ BD_READ_SIGNED_OR_RETURN(7, &shdr->quantizer_update_value[i]);
+ }
+
+ for (size_t i = 0; i < kMaxMBSegments; ++i) {
+ bool loop_filter_update;
+ BD_READ_BOOL_OR_RETURN(&loop_filter_update);
+ if (loop_filter_update)
+ BD_READ_SIGNED_OR_RETURN(6, &shdr->lf_update_value[i]);
+ }
+ }
+
+ if (shdr->update_mb_segmentation_map) {
+ for (size_t i = 0; i < kNumMBFeatureTreeProbs; ++i) {
+ bool segment_prob_update;
+ BD_READ_BOOL_OR_RETURN(&segment_prob_update);
+ if (segment_prob_update)
+ BD_READ_UNSIGNED_OR_RETURN(8, &shdr->segment_prob[i]);
+ else
+ shdr->segment_prob[i] = Vp8SegmentationHeader::kDefaultSegmentProb;
+ }
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseLoopFilterHeader(bool keyframe) {
+ Vp8LoopFilterHeader* lfhdr = &curr_loopfilter_hdr_;
+
+ if (keyframe)
+ memset(lfhdr, 0, sizeof(*lfhdr));
+
+ int type;
+ BD_READ_UNSIGNED_OR_RETURN(1, &type);
+ lfhdr->type = static_cast<Vp8LoopFilterHeader::Type>(type);
+ BD_READ_UNSIGNED_OR_RETURN(6, &lfhdr->level);
+ BD_READ_UNSIGNED_OR_RETURN(3, &lfhdr->sharpness_level);
+ BD_READ_BOOL_OR_RETURN(&lfhdr->loop_filter_adj_enable);
+
+ if (lfhdr->loop_filter_adj_enable) {
+ BD_READ_BOOL_OR_RETURN(&lfhdr->mode_ref_lf_delta_update);
+ if (lfhdr->mode_ref_lf_delta_update) {
+ for (size_t i = 0; i < kNumBlockContexts; ++i) {
+ bool ref_frame_delta_update_flag;
+ BD_READ_BOOL_OR_RETURN(&ref_frame_delta_update_flag);
+ if (ref_frame_delta_update_flag)
+ BD_READ_SIGNED_OR_RETURN(6, &lfhdr->ref_frame_delta[i]);
+ }
+
+ for (size_t i = 0; i < kNumBlockContexts; ++i) {
+ bool mb_mode_delta_update_flag;
+ BD_READ_BOOL_OR_RETURN(&mb_mode_delta_update_flag);
+ if (mb_mode_delta_update_flag)
+ BD_READ_SIGNED_OR_RETURN(6, &lfhdr->mb_mode_delta[i]);
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseQuantizationHeader(Vp8QuantizationHeader* qhdr) {
+ // If any of the delta values is not present, the delta should be zero.
+ memset(qhdr, 0, sizeof(*qhdr));
+
+ BD_READ_UNSIGNED_OR_RETURN(7, &qhdr->y_ac_qi);
+
+ bool delta_present;
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->y_dc_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->y2_dc_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->y2_ac_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->uv_dc_delta);
+
+ BD_READ_BOOL_OR_RETURN(&delta_present);
+ if (delta_present)
+ BD_READ_SIGNED_OR_RETURN(4, &qhdr->uv_ac_delta);
+
+ return true;
+}
+
+// See spec for details on these values.
+const uint8_t kCoeffUpdateProbs[kNumBlockTypes][kNumCoeffBands]
+ [kNumPrevCoeffContexts][kNumEntropyNodes] = {
+ {
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255},
+ {249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255},
+ {234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255},
+ {250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ {
+ {
+ {217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255},
+ {234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255},
+ },
+ {
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ {
+ {
+ {186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255},
+ {234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255},
+ {251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ {
+ {
+ {248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255},
+ {248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255},
+ {248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255},
+ {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+};
+
+const uint8_t kKeyframeYModeProbs[kNumYModeProbs] = {145, 156, 163, 128};
+const uint8_t kKeyframeUVModeProbs[kNumUVModeProbs] = {142, 114, 183};
+
+const uint8_t kDefaultYModeProbs[kNumYModeProbs] = {112, 86, 140, 37};
+const uint8_t kDefaultUVModeProbs[kNumUVModeProbs] = {162, 101, 204};
+
+const uint8_t kDefaultCoeffProbs[kNumBlockTypes][kNumCoeffBands]
+ [kNumPrevCoeffContexts][kNumEntropyNodes] = {
+ {
+ {
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ {253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128},
+ {189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128},
+ {106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128},
+ {181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128},
+ { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128},
+ },
+ {
+ { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128},
+ {184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128},
+ { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128},
+ {170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128},
+ { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128},
+ {207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128},
+ {102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128},
+ {177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128},
+ { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ }
+ },
+ {
+ {
+ {198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62},
+ {131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1},
+ { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128},
+ },
+ {
+ { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128},
+ {184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128},
+ { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128},
+ },
+ {
+ { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128},
+ { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128},
+ { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128},
+ },
+ {
+ { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128},
+ {109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128},
+ { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128},
+ { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128},
+ { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128},
+ },
+ {
+ { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128},
+ {124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128},
+ { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128},
+ },
+ {
+ { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128},
+ {121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128},
+ { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128},
+ },
+ {
+ { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128},
+ {203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128},
+ {137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128},
+ }
+ },
+ {
+ {
+ {253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128},
+ {175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128},
+ { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128},
+ },
+ {
+ { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128},
+ {239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128},
+ {155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128},
+ {201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128},
+ { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128},
+ },
+ {
+ { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128},
+ {223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128},
+ {141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128},
+ {190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128},
+ {149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128},
+ {213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128},
+ { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ {
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128},
+ }
+ },
+ {
+ {
+ {202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255},
+ {126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128},
+ { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128},
+ },
+ {
+ { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128},
+ {166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128},
+ { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128},
+ },
+ {
+ { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128},
+ {124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128},
+ { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128},
+ },
+ {
+ { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128},
+ {149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128},
+ { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128}
+ },
+ {
+ { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128},
+ {123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128},
+ { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128},
+ },
+ {
+ { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128},
+ {168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128},
+ { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128},
+ },
+ {
+ { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128},
+ {141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128},
+ { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128},
+ },
+ {
+ { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ {238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128},
+ },
+ },
+};
+
+const uint8_t kMVUpdateProbs[kNumMVContexts][kNumMVProbs] =
+{
+ {
+ 237, 246, 253, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 250, 250, 252, 254, 254,
+ },
+ {
+ 231, 243, 245, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 251, 251, 254, 254, 254,
+ },
+};
+
+const uint8_t kDefaultMVProbs[kNumMVContexts][kNumMVProbs] = {
+ {
+ 162, 128, 225, 146, 172, 147, 214, 39, 156,
+ 128, 129, 132, 75, 145, 178, 206, 239, 254, 254,
+ },
+ {
+ 164, 128, 204, 170, 119, 235, 140, 230, 228,
+ 128, 130, 130, 74, 148, 180, 203, 236, 254, 254,
+ },
+};
+
+void Vp8Parser::ResetProbs() {
+ static_assert(
+ sizeof(curr_entropy_hdr_.coeff_probs) == sizeof(kDefaultCoeffProbs),
+ "coeff_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.coeff_probs, kDefaultCoeffProbs,
+ sizeof(curr_entropy_hdr_.coeff_probs));
+
+ static_assert(sizeof(curr_entropy_hdr_.mv_probs) == sizeof(kDefaultMVProbs),
+ "mv_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.mv_probs, kDefaultMVProbs,
+ sizeof(curr_entropy_hdr_.mv_probs));
+
+ static_assert(
+ sizeof(curr_entropy_hdr_.y_mode_probs) == sizeof(kDefaultYModeProbs),
+ "y_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.y_mode_probs, kDefaultYModeProbs,
+ sizeof(curr_entropy_hdr_.y_mode_probs));
+
+ static_assert(
+ sizeof(curr_entropy_hdr_.uv_mode_probs) == sizeof(kDefaultUVModeProbs),
+ "uv_probs_arrays_must_be_of_correct_size");
+ memcpy(curr_entropy_hdr_.uv_mode_probs, kDefaultUVModeProbs,
+ sizeof(curr_entropy_hdr_.uv_mode_probs));
+}
+
+bool Vp8Parser::ParseTokenProbs(Vp8EntropyHeader* ehdr,
+ bool update_curr_probs) {
+ for (size_t i = 0; i < kNumBlockTypes; ++i) {
+ for (size_t j = 0; j < kNumCoeffBands; ++j) {
+ for (size_t k = 0; k < kNumPrevCoeffContexts; ++k) {
+ for (size_t l = 0; l < kNumEntropyNodes; ++l) {
+ bool coeff_prob_update_flag;
+ BD_READ_BOOL_WITH_PROB_OR_RETURN(&coeff_prob_update_flag,
+ kCoeffUpdateProbs[i][j][k][l]);
+ if (coeff_prob_update_flag)
+ BD_READ_UNSIGNED_OR_RETURN(8, &ehdr->coeff_probs[i][j][k][l]);
+ }
+ }
+ }
+ }
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.coeff_probs, ehdr->coeff_probs,
+ sizeof(curr_entropy_hdr_.coeff_probs));
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseIntraProbs(Vp8EntropyHeader* ehdr,
+ bool update_curr_probs,
+ bool keyframe) {
+ if (keyframe) {
+ static_assert(
+ sizeof(ehdr->y_mode_probs) == sizeof(kKeyframeYModeProbs),
+ "y_probs_arrays_must_be_of_correct_size");
+ memcpy(ehdr->y_mode_probs, kKeyframeYModeProbs,
+ sizeof(ehdr->y_mode_probs));
+
+ static_assert(
+ sizeof(ehdr->uv_mode_probs) == sizeof(kKeyframeUVModeProbs),
+ "uv_probs_arrays_must_be_of_correct_size");
+ memcpy(ehdr->uv_mode_probs, kKeyframeUVModeProbs,
+ sizeof(ehdr->uv_mode_probs));
+ } else {
+ bool intra_16x16_prob_update_flag;
+ BD_READ_BOOL_OR_RETURN(&intra_16x16_prob_update_flag);
+ if (intra_16x16_prob_update_flag) {
+ for (size_t i = 0; i < kNumYModeProbs; ++i)
+ BD_READ_UNSIGNED_OR_RETURN(8, &ehdr->y_mode_probs[i]);
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.y_mode_probs, ehdr->y_mode_probs,
+ sizeof(curr_entropy_hdr_.y_mode_probs));
+ }
+ }
+
+ bool intra_chroma_prob_update_flag;
+ BD_READ_BOOL_OR_RETURN(&intra_chroma_prob_update_flag);
+ if (intra_chroma_prob_update_flag) {
+ for (size_t i = 0; i < kNumUVModeProbs; ++i)
+ BD_READ_UNSIGNED_OR_RETURN(8, &ehdr->uv_mode_probs[i]);
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.uv_mode_probs, ehdr->uv_mode_probs,
+ sizeof(curr_entropy_hdr_.uv_mode_probs));
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParseMVProbs(Vp8EntropyHeader* ehdr, bool update_curr_probs) {
+ for (size_t mv_ctx = 0; mv_ctx < kNumMVContexts; ++mv_ctx) {
+ for (size_t p = 0; p < kNumMVProbs; ++p) {
+ bool mv_prob_update_flag;
+ BD_READ_BOOL_WITH_PROB_OR_RETURN(&mv_prob_update_flag,
+ kMVUpdateProbs[mv_ctx][p]);
+ if (mv_prob_update_flag) {
+ uint8_t prob;
+ BD_READ_UNSIGNED_OR_RETURN(7, &prob);
+ ehdr->mv_probs[mv_ctx][p] = prob ? (prob << 1) : 1;
+ }
+ }
+ }
+
+ if (update_curr_probs) {
+ memcpy(curr_entropy_hdr_.mv_probs, ehdr->mv_probs,
+ sizeof(curr_entropy_hdr_.mv_probs));
+ }
+
+ return true;
+}
+
+bool Vp8Parser::ParsePartitions(Vp8FrameHeader* fhdr) {
+ CHECK_GE(fhdr->num_of_dct_partitions, 1u);
+ CHECK_LE(fhdr->num_of_dct_partitions, kMaxDCTPartitions);
+
+ // DCT partitions start after the first partition and partition size values
+ // that follow it. There are num_of_dct_partitions - 1 sizes stored in the
+ // stream after the first partition, each 3 bytes long. The size of last
+ // DCT partition is not stored in the stream, but is instead calculated by
+ // taking the remainder of the frame size after the penultimate DCT partition.
+ size_t first_dct_pos = fhdr->first_part_offset + fhdr->first_part_size +
+ (fhdr->num_of_dct_partitions - 1) * 3;
+
+ // Make sure we have enough data for the first partition and partition sizes.
+ if (fhdr->frame_size < first_dct_pos)
+ return false;
+
+ // Total size of all DCT partitions.
+ size_t bytes_left = fhdr->frame_size - first_dct_pos;
+
+ // Position ourselves at the beginning of partition size values.
+ const uint8_t* ptr =
+ fhdr->data + fhdr->first_part_offset + fhdr->first_part_size;
+
+ // Read sizes from the stream (if present).
+ for (size_t i = 0; i < fhdr->num_of_dct_partitions - 1; ++i) {
+ fhdr->dct_partition_sizes[i] = (ptr[2] << 16) | (ptr[1] << 8) | ptr[0];
+
+ // Make sure we have enough data in the stream for ith partition and
+ // subtract its size from total.
+ if (bytes_left < fhdr->dct_partition_sizes[i])
+ return false;
+
+ bytes_left -= fhdr->dct_partition_sizes[i];
+
+ // Move to the position of the next partition size value.
+ ptr += 3;
+ }
+
+ // The remainder of the data belongs to the last DCT partition.
+ fhdr->dct_partition_sizes[fhdr->num_of_dct_partitions - 1] = bytes_left;
+
+ DVLOG(4) << "Control part size: " << fhdr->first_part_size;
+ for (size_t i = 0; i < fhdr->num_of_dct_partitions; ++i)
+ DVLOG(4) << "DCT part " << i << " size: " << fhdr->dct_partition_sizes[i];
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/vp8_parser.h b/chromium/media/filters/vp8_parser.h
new file mode 100644
index 00000000000..dc7bc241173
--- /dev/null
+++ b/chromium/media/filters/vp8_parser.h
@@ -0,0 +1,195 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP8 raw stream parser,
+// as defined in RFC 6386.
+
+#ifndef MEDIA_FILTERS_VP8_PARSER_H_
+#define MEDIA_FILTERS_VP8_PARSER_H_
+
+#include "media/base/media_export.h"
+#include "media/filters/vp8_bool_decoder.h"
+
+namespace media {
+
+// See spec for definitions of values/fields.
+const size_t kMaxMBSegments = 4;
+const size_t kNumMBFeatureTreeProbs = 3;
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct MEDIA_EXPORT Vp8SegmentationHeader {
+ enum SegmentFeatureMode { FEATURE_MODE_DELTA = 0, FEATURE_MODE_ABSOLUTE = 1 };
+
+ bool segmentation_enabled;
+ bool update_mb_segmentation_map;
+ bool update_segment_feature_data;
+ SegmentFeatureMode segment_feature_mode;
+
+ int8_t quantizer_update_value[kMaxMBSegments];
+ int8_t lf_update_value[kMaxMBSegments];
+ static const int kDefaultSegmentProb = 255;
+ uint8_t segment_prob[kNumMBFeatureTreeProbs];
+};
+
+const size_t kNumBlockContexts = 4;
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct MEDIA_EXPORT Vp8LoopFilterHeader {
+ enum Type { LOOP_FILTER_TYPE_NORMAL = 0, LOOP_FILTER_TYPE_SIMPLE = 1 };
+ Type type;
+ uint8_t level;
+ uint8_t sharpness_level;
+ bool loop_filter_adj_enable;
+ bool mode_ref_lf_delta_update;
+
+ int8_t ref_frame_delta[kNumBlockContexts];
+ int8_t mb_mode_delta[kNumBlockContexts];
+};
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct MEDIA_EXPORT Vp8QuantizationHeader {
+ uint8_t y_ac_qi;
+ int8_t y_dc_delta;
+ int8_t y2_dc_delta;
+ int8_t y2_ac_delta;
+ int8_t uv_dc_delta;
+ int8_t uv_ac_delta;
+};
+
+const size_t kNumBlockTypes = 4;
+const size_t kNumCoeffBands = 8;
+const size_t kNumPrevCoeffContexts = 3;
+const size_t kNumEntropyNodes = 11;
+
+const size_t kNumMVContexts = 2;
+const size_t kNumMVProbs = 19;
+
+const size_t kNumYModeProbs = 4;
+const size_t kNumUVModeProbs = 3;
+
+// Member of Vp8FrameHeader and will be 0-initialized
+// in Vp8FrameHeader's constructor.
+struct Vp8EntropyHeader {
+ uint8_t coeff_probs[kNumBlockTypes][kNumCoeffBands][kNumPrevCoeffContexts]
+ [kNumEntropyNodes];
+
+ uint8_t y_mode_probs[kNumYModeProbs];
+ uint8_t uv_mode_probs[kNumUVModeProbs];
+
+ uint8_t mv_probs[kNumMVContexts][kNumMVProbs];
+};
+
+const size_t kMaxDCTPartitions = 8;
+
+struct MEDIA_EXPORT Vp8FrameHeader {
+ Vp8FrameHeader();
+
+ enum FrameType { KEYFRAME = 0, INTERFRAME = 1 };
+ bool IsKeyframe() const { return key_frame == KEYFRAME; }
+
+ enum GoldenRefreshMode {
+ COPY_LAST_TO_GOLDEN = 1,
+ COPY_ALT_TO_GOLDEN = 2,
+ };
+
+ enum AltRefreshMode {
+ COPY_LAST_TO_ALT = 1,
+ COPY_GOLDEN_TO_ALT = 2,
+ };
+
+ FrameType key_frame;
+ uint8_t version;
+ bool is_experimental;
+ bool show_frame;
+ size_t first_part_size;
+
+ uint16_t width;
+ uint8_t horizontal_scale;
+ uint16_t height;
+ uint8_t vertical_scale;
+
+ Vp8SegmentationHeader segmentation_hdr;
+ Vp8LoopFilterHeader loopfilter_hdr;
+ Vp8QuantizationHeader quantization_hdr;
+
+ size_t num_of_dct_partitions;
+
+ Vp8EntropyHeader entropy_hdr;
+
+ bool refresh_entropy_probs;
+ bool refresh_golden_frame;
+ bool refresh_alternate_frame;
+ GoldenRefreshMode copy_buffer_to_golden;
+ AltRefreshMode copy_buffer_to_alternate;
+ uint8_t sign_bias_golden;
+ uint8_t sign_bias_alternate;
+ bool refresh_last;
+
+ bool mb_no_skip_coeff;
+ uint8_t prob_skip_false;
+ uint8_t prob_intra;
+ uint8_t prob_last;
+ uint8_t prob_gf;
+
+ const uint8_t* data;
+ size_t frame_size;
+
+ size_t dct_partition_sizes[kMaxDCTPartitions];
+ // Offset in bytes from data.
+ off_t first_part_offset;
+ // Offset in bits from first_part_offset.
+ off_t macroblock_bit_offset;
+
+ // Bool decoder state
+ uint8_t bool_dec_range;
+ uint8_t bool_dec_value;
+ uint8_t bool_dec_count;
+};
+
+// A parser for raw VP8 streams as specified in RFC 6386.
+class MEDIA_EXPORT Vp8Parser {
+ public:
+ Vp8Parser();
+ ~Vp8Parser();
+
+ // Try to parse exactly one VP8 frame starting at |ptr| and of size |size|,
+ // filling the parsed data in |fhdr|. Return true on success.
+ // Size has to be exactly the size of the frame and coming from the caller,
+ // who needs to acquire it from elsewhere (normally from a container).
+ bool ParseFrame(const uint8_t* ptr, size_t size, Vp8FrameHeader* fhdr);
+
+ private:
+ bool ParseFrameTag(Vp8FrameHeader* fhdr);
+ bool ParseFrameHeader(Vp8FrameHeader* fhdr);
+
+ bool ParseSegmentationHeader(bool keyframe);
+ bool ParseLoopFilterHeader(bool keyframe);
+ bool ParseQuantizationHeader(Vp8QuantizationHeader* qhdr);
+ bool ParseTokenProbs(Vp8EntropyHeader* ehdr, bool update_curr_probs);
+ bool ParseIntraProbs(Vp8EntropyHeader* ehdr,
+ bool update_curr_probs,
+ bool keyframe);
+ bool ParseMVProbs(Vp8EntropyHeader* ehdr, bool update_curr_probs);
+ bool ParsePartitions(Vp8FrameHeader* fhdr);
+ void ResetProbs();
+
+ // These persist across calls to ParseFrame() and may be used and/or updated
+ // for subsequent frames if the stream instructs us to do so.
+ Vp8SegmentationHeader curr_segmentation_hdr_;
+ Vp8LoopFilterHeader curr_loopfilter_hdr_;
+ Vp8EntropyHeader curr_entropy_hdr_;
+
+ const uint8_t* stream_;
+ size_t bytes_left_;
+ Vp8BoolDecoder bd_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8Parser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VP8_PARSER_H_
diff --git a/chromium/media/filters/vp8_parser_unittest.cc b/chromium/media/filters/vp8_parser_unittest.cc
new file mode 100644
index 00000000000..39a2a801032
--- /dev/null
+++ b/chromium/media/filters/vp8_parser_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/sys_byteorder.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/vp8_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(Vp8ParserTest, StreamFileParsing) {
+ base::FilePath file_path = GetTestDataFilePath("test-25fps.vp8");
+ // Number of frames in the test stream to be parsed.
+ const int num_frames = 250;
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ Vp8Parser parser;
+
+ // Parse until the end of stream/unsupported stream/error in stream is found.
+ int num_parsed_frames = 0;
+ const uint8_t* stream_ptr = stream.data();
+ size_t bytes_left = stream.length();
+ // Skip IVF file header.
+ const size_t kIvfStreamHeaderLen = 32;
+ CHECK_GE(bytes_left, kIvfStreamHeaderLen);
+ stream_ptr += kIvfStreamHeaderLen;
+ bytes_left -= kIvfStreamHeaderLen;
+
+ const size_t kIvfFrameHeaderLen = 12;
+ while (bytes_left > kIvfFrameHeaderLen) {
+ Vp8FrameHeader fhdr;
+ uint32_t frame_size =
+ base::ByteSwapToLE32(*reinterpret_cast<const uint32_t*>(stream_ptr));
+ // Skip IVF frame header.
+ stream_ptr += kIvfFrameHeaderLen;
+ bytes_left -= kIvfFrameHeaderLen;
+
+ ASSERT_TRUE(parser.ParseFrame(stream_ptr, frame_size, &fhdr));
+
+ stream_ptr += frame_size;
+ bytes_left -= frame_size;
+ ++num_parsed_frames;
+ }
+
+ DVLOG(1) << "Number of successfully parsed frames before EOS: "
+ << num_parsed_frames;
+
+ EXPECT_EQ(num_frames, num_parsed_frames);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/vpx_video_decoder.cc b/chromium/media/filters/vpx_video_decoder.cc
index bf7cc3cdaac..64380b407a5 100644
--- a/chromium/media/filters/vpx_video_decoder.cc
+++ b/chromium/media/filters/vpx_video_decoder.cc
@@ -17,14 +17,13 @@
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_byteorder.h"
+#include "base/sys_info.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/demuxer_stream.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
-#include "media/base/video_decoder_config.h"
-#include "media/base/video_frame.h"
#include "media/base/video_util.h"
// Include libvpx header files.
@@ -50,7 +49,7 @@ static int GetThreadCount(const VideoDecoderConfig& config) {
// Refer to http://crbug.com/93932 for tsan suppressions on decoding.
int decode_threads = kDecodeThreads;
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
if (threads.empty() || !base::StringToInt(threads, &decode_threads)) {
if (config.codec() == kCodecVP9) {
@@ -63,6 +62,8 @@ static int GetThreadCount(const VideoDecoderConfig& config) {
decode_threads = 4;
}
+ decode_threads = std::min(decode_threads,
+ base::SysInfo::NumberOfProcessors());
return decode_threads;
}
@@ -353,10 +354,12 @@ void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
return;
}
- base::ResetAndReturn(&decode_cb_).Run(kOk);
-
if (video_frame.get())
output_cb_.Run(video_frame);
+
+ // VideoDecoderShim expects that |decode_cb| is called only after
+ // |output_cb_|.
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
}
bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
@@ -367,14 +370,18 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
// Pass |buffer| to libvpx.
int64 timestamp = buffer->timestamp().InMicroseconds();
void* user_priv = reinterpret_cast<void*>(&timestamp);
- vpx_codec_err_t status = vpx_codec_decode(vpx_codec_,
- buffer->data(),
- buffer->data_size(),
- user_priv,
- 0);
- if (status != VPX_CODEC_OK) {
- LOG(ERROR) << "vpx_codec_decode() failed, status=" << status;
- return false;
+
+ {
+ TRACE_EVENT1("video", "vpx_codec_decode", "timestamp", timestamp);
+ vpx_codec_err_t status = vpx_codec_decode(vpx_codec_,
+ buffer->data(),
+ buffer->data_size(),
+ user_priv,
+ 0);
+ if (status != VPX_CODEC_OK) {
+ LOG(ERROR) << "vpx_codec_decode() failed, status=" << status;
+ return false;
+ }
}
// Gets pointer to decoded data.
@@ -400,15 +407,18 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
const uint64 side_data_id = base::NetToHost64(
*(reinterpret_cast<const uint64*>(buffer->side_data())));
if (side_data_id == 1) {
- status = vpx_codec_decode(vpx_codec_alpha_,
- buffer->side_data() + 8,
- buffer->side_data_size() - 8,
- user_priv_alpha,
- 0);
-
- if (status != VPX_CODEC_OK) {
- LOG(ERROR) << "vpx_codec_decode() failed on alpha, status=" << status;
- return false;
+ {
+ TRACE_EVENT1("video", "vpx_codec_decode_alpha",
+ "timestamp_alpha", timestamp_alpha);
+ vpx_codec_err_t status = vpx_codec_decode(vpx_codec_alpha_,
+ buffer->side_data() + 8,
+ buffer->side_data_size() - 8,
+ user_priv_alpha,
+ 0);
+ if (status != VPX_CODEC_OK) {
+ LOG(ERROR) << "vpx_codec_decode() failed on alpha, status=" << status;
+ return false;
+ }
}
// Gets pointer to decoded data.
@@ -424,6 +434,13 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
LOG(ERROR) << "Invalid output timestamp on alpha.";
return false;
}
+
+ if (vpx_image_alpha->d_h != vpx_image->d_h ||
+ vpx_image_alpha->d_w != vpx_image->d_w) {
+ LOG(ERROR) << "The alpha plane dimensions are not the same as the "
+ "image dimensions.";
+ return false;
+ }
}
}
@@ -448,15 +465,25 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
codec_format = VideoFrame::YV24;
uv_rows = vpx_image->d_h;
} else if (vpx_codec_alpha_) {
+ // TODO(watk): A limitation of conflating color space with pixel format is
+ // that it's not possible to have BT709 with alpha.
+ // Until color space is separated from format, prefer YV12A over YV12HD.
codec_format = VideoFrame::YV12A;
+ } else if (vpx_image->cs == VPX_CS_BT_709) {
+ codec_format = VideoFrame::YV12HD;
}
- gfx::Size size(vpx_image->d_w, vpx_image->d_h);
+ // The mixed |w|/|d_h| in |coded_size| is intentional. Setting the correct
+ // coded width is necessary to allow coalesced memory access, which may avoid
+ // frame copies. Setting the correct coded height however does not have any
+ // benefit, and only risk copying too much data.
+ const gfx::Size coded_size(vpx_image->w, vpx_image->d_h);
+ const gfx::Size visible_size(vpx_image->d_w, vpx_image->d_h);
if (!vpx_codec_alpha_ && memory_pool_.get()) {
*video_frame = VideoFrame::WrapExternalYuvData(
codec_format,
- size, gfx::Rect(size), config_.natural_size(),
+ coded_size, gfx::Rect(visible_size), config_.natural_size(),
vpx_image->stride[VPX_PLANE_Y],
vpx_image->stride[VPX_PLANE_U],
vpx_image->stride[VPX_PLANE_V],
@@ -470,8 +497,8 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
*video_frame = frame_pool_.CreateFrame(
codec_format,
- size,
- gfx::Rect(size),
+ visible_size,
+ gfx::Rect(visible_size),
config_.natural_size(),
kNoTimestamp());
@@ -495,8 +522,8 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
return;
}
CopyAPlane(vpx_image_alpha->planes[VPX_PLANE_Y],
- vpx_image->stride[VPX_PLANE_Y],
- vpx_image->d_h,
+ vpx_image_alpha->stride[VPX_PLANE_Y],
+ vpx_image_alpha->d_h,
video_frame->get());
}
diff --git a/chromium/media/formats/common/offset_byte_queue.cc b/chromium/media/formats/common/offset_byte_queue.cc
index a2b6994e40c..0bbff80c7cf 100644
--- a/chromium/media/formats/common/offset_byte_queue.cc
+++ b/chromium/media/formats/common/offset_byte_queue.cc
@@ -4,7 +4,6 @@
#include "media/formats/common/offset_byte_queue.h"
-#include "base/basictypes.h"
#include "base/logging.h"
namespace media {
diff --git a/chromium/media/formats/common/stream_parser_test_base.cc b/chromium/media/formats/common/stream_parser_test_base.cc
index 1d5f4b9c0c2..f47a87da502 100644
--- a/chromium/media/formats/common/stream_parser_test_base.cc
+++ b/chromium/media/formats/common/stream_parser_test_base.cc
@@ -19,7 +19,7 @@ static std::string BufferQueueToString(
itr != buffers.end();
++itr) {
ss << " " << (*itr)->timestamp().InMilliseconds();
- if ((*itr)->IsKeyframe())
+ if ((*itr)->is_key_frame())
ss << "K";
}
ss << " }";
@@ -73,11 +73,9 @@ bool StreamParserTestBase::AppendDataInPieces(const uint8* data,
}
void StreamParserTestBase::OnInitDone(
- bool success,
const StreamParser::InitParameters& params) {
EXPECT_TRUE(params.auto_update_timestamp_offset);
- DVLOG(1) << __FUNCTION__ << "(" << success << ", "
- << params.duration.InMilliseconds() << ", "
+ DVLOG(1) << __FUNCTION__ << "(" << params.duration.InMilliseconds() << ", "
<< params.auto_update_timestamp_offset << ")";
}
@@ -110,9 +108,10 @@ bool StreamParserTestBase::OnNewBuffers(
return true;
}
-void StreamParserTestBase::OnKeyNeeded(const std::string& type,
+void StreamParserTestBase::OnKeyNeeded(EmeInitDataType type,
const std::vector<uint8>& init_data) {
- DVLOG(1) << __FUNCTION__ << "(" << type << ", " << init_data.size() << ")";
+ DVLOG(1) << __FUNCTION__ << "(" << static_cast<int>(type) << ", "
+ << init_data.size() << ")";
}
void StreamParserTestBase::OnNewSegment() {
diff --git a/chromium/media/formats/common/stream_parser_test_base.h b/chromium/media/formats/common/stream_parser_test_base.h
index eb31562dd7f..ea61dd3a9a2 100644
--- a/chromium/media/formats/common/stream_parser_test_base.h
+++ b/chromium/media/formats/common/stream_parser_test_base.h
@@ -50,15 +50,14 @@ class StreamParserTestBase {
private:
bool AppendDataInPieces(const uint8* data, size_t length, size_t piece_size);
- void OnInitDone(bool success, const StreamParser::InitParameters& params);
+ void OnInitDone(const StreamParser::InitParameters& params);
bool OnNewConfig(const AudioDecoderConfig& audio_config,
const VideoDecoderConfig& video_config,
const StreamParser::TextTrackConfigMap& text_config);
bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
const StreamParser::BufferQueue& video_buffers,
const StreamParser::TextBufferQueueMap& text_map);
- void OnKeyNeeded(const std::string& type,
- const std::vector<uint8>& init_data);
+ void OnKeyNeeded(EmeInitDataType type, const std::vector<uint8>& init_data);
void OnNewSegment();
void OnEndOfSegment();
diff --git a/chromium/media/formats/mp2t/es_adapter_video.cc b/chromium/media/formats/mp2t/es_adapter_video.cc
index 70b16e350f0..0208d44fb70 100644
--- a/chromium/media/formats/mp2t/es_adapter_video.cc
+++ b/chromium/media/formats/mp2t/es_adapter_video.cc
@@ -5,7 +5,6 @@
#include "media/formats/mp2t/es_adapter_video.h"
#include "media/base/buffers.h"
-#include "media/base/stream_parser_buffer.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/mp2t/mp2t_common.h"
@@ -112,7 +111,7 @@ bool EsAdapterVideo::OnNewBuffer(
// - if it is not associated with any config,
// - or if no valid key frame has been found so far.
if (!has_valid_config_ ||
- (!has_valid_frame_ && !stream_parser_buffer->IsKeyframe())) {
+ (!has_valid_frame_ && !stream_parser_buffer->is_key_frame())) {
discarded_frame_count_++;
return true;
}
@@ -196,7 +195,7 @@ base::TimeDelta EsAdapterVideo::GetNextFramePts(base::TimeDelta current_pts) {
void EsAdapterVideo::ReplaceDiscardedFrames(
const scoped_refptr<StreamParserBuffer>& stream_parser_buffer) {
DCHECK_GT(discarded_frame_count_, 0);
- DCHECK(stream_parser_buffer->IsKeyframe());
+ DCHECK(stream_parser_buffer->is_key_frame());
// PTS/DTS are interpolated between the min PTS/DTS of discarded frames
// and the PTS/DTS of the first valid buffer.
@@ -219,7 +218,7 @@ void EsAdapterVideo::ReplaceDiscardedFrames(
StreamParserBuffer::CopyFrom(
stream_parser_buffer->data(),
stream_parser_buffer->data_size(),
- stream_parser_buffer->IsKeyframe(),
+ stream_parser_buffer->is_key_frame(),
stream_parser_buffer->type(),
stream_parser_buffer->track_id());
frame->SetDecodeTimestamp(dts);
diff --git a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
index 906d8e175aa..b601f7ece1f 100644
--- a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
+++ b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
@@ -60,7 +60,6 @@ GenerateFakeBuffers(const int* frame_pts_ms,
class EsAdapterVideoTest : public testing::Test {
public:
EsAdapterVideoTest();
- virtual ~EsAdapterVideoTest() {}
protected:
// Feed the ES adapter with the buffers from |buffer_queue|.
@@ -92,7 +91,7 @@ void EsAdapterVideoTest::OnNewConfig(const VideoDecoderConfig& video_config) {
void EsAdapterVideoTest::OnNewBuffer(
scoped_refptr<StreamParserBuffer> buffer) {
buffer_descriptors_ << "(" << buffer->duration().InMilliseconds() << ","
- << (buffer->IsKeyframe() ? "Y" : "N") << ") ";
+ << (buffer->is_key_frame() ? "Y" : "N") << ") ";
}
std::string EsAdapterVideoTest::RunAdapterTest(
diff --git a/chromium/media/formats/mp2t/es_parser_adts.cc b/chromium/media/formats/mp2t/es_parser_adts.cc
index 6bae0c78a90..d646d0e1d3d 100644
--- a/chromium/media/formats/mp2t/es_parser_adts.cc
+++ b/chromium/media/formats/mp2t/es_parser_adts.cc
@@ -4,7 +4,6 @@
#include "media/formats/mp2t/es_parser_adts.h"
-#include <list>
#include "base/basictypes.h"
#include "base/logging.h"
diff --git a/chromium/media/formats/mp2t/es_parser_adts.h b/chromium/media/formats/mp2t/es_parser_adts.h
index 39998ecf24f..740e3ba66c2 100644
--- a/chromium/media/formats/mp2t/es_parser_adts.h
+++ b/chromium/media/formats/mp2t/es_parser_adts.h
@@ -33,17 +33,17 @@ class MEDIA_EXPORT EsParserAdts : public EsParser {
EsParserAdts(const NewAudioConfigCB& new_audio_config_cb,
const EmitBufferCB& emit_buffer_cb,
bool sbr_in_mimetype);
- virtual ~EsParserAdts();
+ ~EsParserAdts() override;
// EsParser implementation.
- virtual void Flush() override;
+ void Flush() override;
private:
struct AdtsFrame;
// EsParser implementation.
- virtual bool ParseFromEsQueue() override;
- virtual void ResetInternal() override;
+ bool ParseFromEsQueue() override;
+ void ResetInternal() override;
// Synchronize the stream on an ADTS syncword (consuming bytes from
// |es_queue_| if needed).
diff --git a/chromium/media/formats/mp2t/es_parser_adts_unittest.cc b/chromium/media/formats/mp2t/es_parser_adts_unittest.cc
index d1952f30a88..966553803f6 100644
--- a/chromium/media/formats/mp2t/es_parser_adts_unittest.cc
+++ b/chromium/media/formats/mp2t/es_parser_adts_unittest.cc
@@ -22,7 +22,6 @@ class EsParserAdtsTest : public EsParserTestBase,
public testing::Test {
public:
EsParserAdtsTest();
- virtual ~EsParserAdtsTest() {}
protected:
bool Process(const std::vector<Packet>& pes_packets, bool force_timing);
diff --git a/chromium/media/formats/mp2t/es_parser_h264.cc b/chromium/media/formats/mp2t/es_parser_h264.cc
index dc85cdc54b9..830da34fc54 100644
--- a/chromium/media/formats/mp2t/es_parser_h264.cc
+++ b/chromium/media/formats/mp2t/es_parser_h264.cc
@@ -4,7 +4,6 @@
#include "media/formats/mp2t/es_parser_h264.h"
-#include "base/basictypes.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "media/base/buffers.h"
@@ -12,10 +11,9 @@
#include "media/base/video_frame.h"
#include "media/filters/h264_parser.h"
#include "media/formats/common/offset_byte_queue.h"
-#include "media/formats/mp2t/es_adapter_video.h"
#include "media/formats/mp2t/mp2t_common.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
namespace mp2t {
diff --git a/chromium/media/formats/mp2t/es_parser_h264.h b/chromium/media/formats/mp2t/es_parser_h264.h
index a6a67b57224..b37cfc117c2 100644
--- a/chromium/media/formats/mp2t/es_parser_h264.h
+++ b/chromium/media/formats/mp2t/es_parser_h264.h
@@ -42,15 +42,15 @@ class MEDIA_EXPORT EsParserH264 : public EsParser {
EsParserH264(const NewVideoConfigCB& new_video_config_cb,
const EmitBufferCB& emit_buffer_cb);
- virtual ~EsParserH264();
+ ~EsParserH264() override;
// EsParser implementation.
- virtual void Flush() override;
+ void Flush() override;
private:
// EsParser implementation.
- virtual bool ParseFromEsQueue() override;
- virtual void ResetInternal() override;
+ bool ParseFromEsQueue() override;
+ void ResetInternal() override;
// Find the AUD located at or after |*stream_pos|.
// Return true if an AUD is found.
diff --git a/chromium/media/formats/mp2t/es_parser_h264_unittest.cc b/chromium/media/formats/mp2t/es_parser_h264_unittest.cc
index 5228b6e8a1e..65f9a37be96 100644
--- a/chromium/media/formats/mp2t/es_parser_h264_unittest.cc
+++ b/chromium/media/formats/mp2t/es_parser_h264_unittest.cc
@@ -25,7 +25,6 @@ class EsParserH264Test : public EsParserTestBase,
public testing::Test {
public:
EsParserH264Test() {}
- virtual ~EsParserH264Test() {}
protected:
void LoadH264Stream(const char* filename);
diff --git a/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc b/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc
index c092ccbddb0..51176140e3a 100644
--- a/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc
+++ b/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc
@@ -4,7 +4,6 @@
#include "media/formats/mp2t/es_parser_mpeg1audio.h"
-#include <list>
#include "base/basictypes.h"
#include "base/bind.h"
diff --git a/chromium/media/formats/mp2t/es_parser_mpeg1audio.h b/chromium/media/formats/mp2t/es_parser_mpeg1audio.h
index f754d3b5d29..c10f8fdfffa 100644
--- a/chromium/media/formats/mp2t/es_parser_mpeg1audio.h
+++ b/chromium/media/formats/mp2t/es_parser_mpeg1audio.h
@@ -34,10 +34,10 @@ class MEDIA_EXPORT EsParserMpeg1Audio : public EsParser {
EsParserMpeg1Audio(const NewAudioConfigCB& new_audio_config_cb,
const EmitBufferCB& emit_buffer_cb,
const LogCB& log_cb);
- virtual ~EsParserMpeg1Audio();
+ ~EsParserMpeg1Audio() override;
// EsParser implementation.
- virtual void Flush() override;
+ void Flush() override;
private:
// Used to link a PTS with a byte position in the ES stream.
@@ -47,8 +47,8 @@ class MEDIA_EXPORT EsParserMpeg1Audio : public EsParser {
struct Mpeg1AudioFrame;
// EsParser implementation.
- virtual bool ParseFromEsQueue() override;
- virtual void ResetInternal() override;
+ bool ParseFromEsQueue() override;
+ void ResetInternal() override;
// Synchronize the stream on a Mpeg1 audio syncword (consuming bytes from
// |es_queue_| if needed).
diff --git a/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc b/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
index 0571f058173..e875c03fe98 100644
--- a/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
+++ b/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
@@ -23,7 +23,6 @@ class EsParserMpeg1AudioTest : public EsParserTestBase,
public testing::Test {
public:
EsParserMpeg1AudioTest();
- virtual ~EsParserMpeg1AudioTest() {}
protected:
bool Process(const std::vector<Packet>& pes_packets, bool force_timing);
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser.cc b/chromium/media/formats/mp2t/mp2t_stream_parser.cc
index a386f33c26f..4d2e2d4c1aa 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser.cc
@@ -6,13 +6,10 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/memory/scoped_ptr.h"
#include "base/stl_util.h"
-#include "media/base/audio_decoder_config.h"
#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
-#include "media/base/video_decoder_config.h"
#include "media/formats/mp2t/es_parser.h"
#include "media/formats/mp2t/es_parser_adts.h"
#include "media/formats/mp2t/es_parser_h264.h"
@@ -169,8 +166,8 @@ void Mp2tStreamParser::Init(
const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- bool /* ignore_text_tracks */ ,
- const NeedKeyCB& need_key_cb,
+ bool /* ignore_text_tracks */,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) {
@@ -179,13 +176,13 @@ void Mp2tStreamParser::Init(
DCHECK(!init_cb.is_null());
DCHECK(!config_cb.is_null());
DCHECK(!new_buffers_cb.is_null());
- DCHECK(!need_key_cb.is_null());
+ DCHECK(!encrypted_media_init_data_cb.is_null());
DCHECK(!end_of_segment_cb.is_null());
init_cb_ = init_cb;
config_cb_ = config_cb;
new_buffers_cb_ = new_buffers_cb;
- need_key_cb_ = need_key_cb;
+ encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
log_cb_ = log_cb;
@@ -512,8 +509,7 @@ bool Mp2tStreamParser::FinishInitializationIfNeeded() {
// For Mpeg2 TS, the duration is not known.
DVLOG(1) << "Mpeg2TS stream parser initialization done";
- base::ResetAndReturn(&init_cb_)
- .Run(true, InitParameters(kInfiniteDuration()));
+ base::ResetAndReturn(&init_cb_).Run(InitParameters(kInfiniteDuration()));
is_initialized_ = true;
return true;
@@ -559,8 +555,8 @@ void Mp2tStreamParser::OnEmitVideoBuffer(
<< stream_parser_buffer->timestamp().InMilliseconds()
<< " dur="
<< stream_parser_buffer->duration().InMilliseconds()
- << " IsKeyframe="
- << stream_parser_buffer->IsKeyframe();
+ << " is_key_frame="
+ << stream_parser_buffer->is_key_frame();
// Ignore the incoming buffer if it is not associated with any config.
if (buffer_queue_chain_.empty()) {
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser.h b/chromium/media/formats/mp2t/mp2t_stream_parser.h
index 3efe2a78c78..ea91d841834 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser.h
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser.h
@@ -28,19 +28,20 @@ class PidState;
class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
public:
explicit Mp2tStreamParser(bool sbr_in_mimetype);
- virtual ~Mp2tStreamParser();
+ ~Mp2tStreamParser() override;
// StreamParser implementation.
- virtual void Init(const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool ignore_text_tracks,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) override;
- virtual void Flush() override;
- virtual bool Parse(const uint8* buf, int size) override;
+ void Init(
+ const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_tracks,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) override;
+ void Flush() override;
+ bool Parse(const uint8* buf, int size) override;
private:
typedef std::map<int, PidState*> PidMap;
@@ -97,7 +98,7 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
InitCB init_cb_;
NewConfigCB config_cb_;
NewBuffersCB new_buffers_cb_;
- NeedKeyCB need_key_cb_;
+ EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
LogCB log_cb_;
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
index bca3831b9b9..263f117358e 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
@@ -101,10 +101,8 @@ class Mp2tStreamParserTest : public testing::Test {
return true;
}
- void OnInit(bool init_ok,
- const StreamParser::InitParameters& params) {
- DVLOG(1) << "OnInit: ok=" << init_ok
- << ", dur=" << params.duration.InMilliseconds()
+ void OnInit(const StreamParser::InitParameters& params) {
+ DVLOG(1) << "OnInit: dur=" << params.duration.InMilliseconds()
<< ", autoTimestampOffset=" << params.auto_update_timestamp_offset;
}
@@ -173,8 +171,7 @@ class Mp2tStreamParserTest : public testing::Test {
return true;
}
- void OnKeyNeeded(const std::string& type,
- const std::vector<uint8>& init_data) {
+ void OnKeyNeeded(EmeInitDataType type, const std::vector<uint8>& init_data) {
NOTREACHED() << "OnKeyNeeded not expected in the Mpeg2 TS parser";
}
diff --git a/chromium/media/formats/mp2t/ts_section_pat.h b/chromium/media/formats/mp2t/ts_section_pat.h
index 32e934eb01d..f550a7f727c 100644
--- a/chromium/media/formats/mp2t/ts_section_pat.h
+++ b/chromium/media/formats/mp2t/ts_section_pat.h
@@ -18,11 +18,11 @@ class TsSectionPat : public TsSectionPsi {
typedef base::Callback<void(int, int)> RegisterPmtCb;
explicit TsSectionPat(const RegisterPmtCb& register_pmt_cb);
- virtual ~TsSectionPat();
+ ~TsSectionPat() override;
// TsSectionPsi implementation.
- virtual bool ParsePsiSection(BitReader* bit_reader) override;
- virtual void ResetPsiSection() override;
+ bool ParsePsiSection(BitReader* bit_reader) override;
+ void ResetPsiSection() override;
private:
RegisterPmtCb register_pmt_cb_;
diff --git a/chromium/media/formats/mp2t/ts_section_pes.h b/chromium/media/formats/mp2t/ts_section_pes.h
index 23091462a42..3be879c3a19 100644
--- a/chromium/media/formats/mp2t/ts_section_pes.h
+++ b/chromium/media/formats/mp2t/ts_section_pes.h
@@ -21,13 +21,13 @@ class TsSectionPes : public TsSection {
public:
TsSectionPes(scoped_ptr<EsParser> es_parser,
TimestampUnroller* timestamp_unroller);
- virtual ~TsSectionPes();
+ ~TsSectionPes() override;
// TsSection implementation.
- virtual bool Parse(bool payload_unit_start_indicator,
+ bool Parse(bool payload_unit_start_indicator,
const uint8* buf, int size) override;
- virtual void Flush() override;
- virtual void Reset() override;
+ void Flush() override;
+ void Reset() override;
private:
// Emit a reassembled PES packet.
diff --git a/chromium/media/formats/mp2t/ts_section_pmt.h b/chromium/media/formats/mp2t/ts_section_pmt.h
index 1d17c068556..6c82d78ff65 100644
--- a/chromium/media/formats/mp2t/ts_section_pmt.h
+++ b/chromium/media/formats/mp2t/ts_section_pmt.h
@@ -21,11 +21,11 @@ class TsSectionPmt : public TsSectionPsi {
typedef base::Callback<void(int, int)> RegisterPesCb;
explicit TsSectionPmt(const RegisterPesCb& register_pes_cb);
- virtual ~TsSectionPmt();
+ ~TsSectionPmt() override;
// Mpeg2TsPsiParser implementation.
- virtual bool ParsePsiSection(BitReader* bit_reader) override;
- virtual void ResetPsiSection() override;
+ bool ParsePsiSection(BitReader* bit_reader) override;
+ void ResetPsiSection() override;
private:
RegisterPesCb register_pes_cb_;
diff --git a/chromium/media/formats/mp2t/ts_section_psi.h b/chromium/media/formats/mp2t/ts_section_psi.h
index 8db91bbb01a..063310c081d 100644
--- a/chromium/media/formats/mp2t/ts_section_psi.h
+++ b/chromium/media/formats/mp2t/ts_section_psi.h
@@ -18,13 +18,13 @@ namespace mp2t {
class TsSectionPsi : public TsSection {
public:
TsSectionPsi();
- virtual ~TsSectionPsi();
+ ~TsSectionPsi() override;
// TsSection implementation.
- virtual bool Parse(bool payload_unit_start_indicator,
+ bool Parse(bool payload_unit_start_indicator,
const uint8* buf, int size) override;
- virtual void Flush() override;
- virtual void Reset() override;
+ void Flush() override;
+ void Reset() override;
// Parse the content of the PSI section.
virtual bool ParsePsiSection(BitReader* bit_reader) = 0;
diff --git a/chromium/media/formats/mp4/aac.cc b/chromium/media/formats/mp4/aac.cc
index 71dededf552..163f24bad23 100644
--- a/chromium/media/formats/mp4/aac.cc
+++ b/chromium/media/formats/mp4/aac.cc
@@ -8,7 +8,6 @@
#include "base/logging.h"
#include "media/base/bit_reader.h"
-#include "media/base/media_log.h"
#include "media/formats/mp4/rcheck.h"
#include "media/formats/mpeg/adts_constants.h"
@@ -58,8 +57,8 @@ bool AAC::Parse(const std::vector<uint8>& data, const LogCB& log_cb) {
RCHECK(reader.ReadBits(5, &profile_));
}
- MEDIA_LOG(log_cb) << "Audio codec: mp4a.40."
- << std::hex << static_cast<int>(profile_);
+ MEDIA_LOG(INFO, log_cb) << "Audio codec: mp4a.40." << std::hex
+ << static_cast<int>(profile_);
RCHECK(SkipDecoderGASpecificConfig(&reader));
RCHECK(SkipErrorSpecificConfig());
diff --git a/chromium/media/formats/mp4/avc.cc b/chromium/media/formats/mp4/avc.cc
index 33fce1d8062..0368d1ba5de 100644
--- a/chromium/media/formats/mp4/avc.cc
+++ b/chromium/media/formats/mp4/avc.cc
@@ -5,7 +5,6 @@
#include "media/formats/mp4/avc.h"
#include <algorithm>
-#include <vector>
#include "base/logging.h"
#include "media/base/decrypt_config.h"
diff --git a/chromium/media/formats/mp4/box_definitions.cc b/chromium/media/formats/mp4/box_definitions.cc
index 9ee43064b0b..5a15240ebbc 100644
--- a/chromium/media/formats/mp4/box_definitions.cc
+++ b/chromium/media/formats/mp4/box_definitions.cc
@@ -27,11 +27,8 @@ FourCC ProtectionSystemSpecificHeader::BoxType() const { return FOURCC_PSSH; }
bool ProtectionSystemSpecificHeader::Parse(BoxReader* reader) {
// Validate the box's contents and hang on to the system ID.
- uint32 size;
RCHECK(reader->ReadFullBoxHeader() &&
- reader->ReadVec(&system_id, 16) &&
- reader->Read4(&size) &&
- reader->HasBytes(size));
+ reader->ReadVec(&system_id, 16));
// Copy the entire box, including the header, for passing to EME as initData.
DCHECK(raw_box.empty());
@@ -396,10 +393,10 @@ bool AVCDecoderConfigurationRecord::ParseInternal(BufferReader* reader,
RCHECK(sps_list[i].size() > 4);
if (!log_cb.is_null()) {
- MEDIA_LOG(log_cb) << "Video codec: avc1." << std::hex
- << static_cast<int>(sps_list[i][1])
- << static_cast<int>(sps_list[i][2])
- << static_cast<int>(sps_list[i][3]);
+ MEDIA_LOG(INFO, log_cb) << "Video codec: avc1." << std::hex
+ << static_cast<int>(sps_list[i][1])
+ << static_cast<int>(sps_list[i][2])
+ << static_cast<int>(sps_list[i][3]);
}
}
@@ -492,8 +489,8 @@ bool ElementaryStreamDescriptor::Parse(BoxReader* reader) {
object_type = es_desc.object_type();
if (object_type != 0x40) {
- MEDIA_LOG(reader->log_cb()) << "Audio codec: mp4a."
- << std::hex << static_cast<int>(object_type);
+ MEDIA_LOG(INFO, reader->log_cb()) << "Audio codec: mp4a." << std::hex
+ << static_cast<int>(object_type);
}
if (es_desc.IsAAC(object_type))
@@ -660,12 +657,15 @@ Movie::~Movie() {}
FourCC Movie::BoxType() const { return FOURCC_MOOV; }
bool Movie::Parse(BoxReader* reader) {
- return reader->ScanChildren() &&
- reader->ReadChild(&header) &&
- reader->ReadChildren(&tracks) &&
- // Media Source specific: 'mvex' required
- reader->ReadChild(&extends) &&
- reader->MaybeReadChildren(&pssh);
+ RCHECK(reader->ScanChildren() && reader->ReadChild(&header) &&
+ reader->ReadChildren(&tracks));
+
+ RCHECK_MEDIA_LOGGED(reader->ReadChild(&extends), reader->log_cb(),
+ "Detected unfragmented MP4. Media Source Extensions "
+ "require ISO BMFF moov to contain mvex to indicate that "
+ "Movie Fragments are to be expected.");
+
+ return reader->MaybeReadChildren(&pssh);
}
TrackFragmentDecodeTime::TrackFragmentDecodeTime() : decode_time(0) {}
diff --git a/chromium/media/formats/mp4/box_definitions.h b/chromium/media/formats/mp4/box_definitions.h
index 89522a18656..026effe6b5f 100644
--- a/chromium/media/formats/mp4/box_definitions.h
+++ b/chromium/media/formats/mp4/box_definitions.h
@@ -11,6 +11,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "media/base/media_export.h"
+#include "media/base/media_log.h"
#include "media/formats/mp4/aac.h"
#include "media/formats/mp4/avc.h"
#include "media/formats/mp4/box_reader.h"
@@ -32,9 +33,9 @@ enum SampleFlags {
#define DECLARE_BOX_METHODS(T) \
T(); \
- virtual ~T(); \
- virtual bool Parse(BoxReader* reader) override; \
- virtual FourCC BoxType() const override; \
+ ~T() override; \
+ bool Parse(BoxReader* reader) override; \
+ FourCC BoxType() const override;
struct MEDIA_EXPORT FileType : Box {
DECLARE_BOX_METHODS(FileType);
diff --git a/chromium/media/formats/mp4/box_reader.cc b/chromium/media/formats/mp4/box_reader.cc
index fd81d137511..5105726da5d 100644
--- a/chromium/media/formats/mp4/box_reader.cc
+++ b/chromium/media/formats/mp4/box_reader.cc
@@ -6,13 +6,10 @@
#include <string.h>
#include <algorithm>
-#include <map>
#include <set>
-#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "media/formats/mp4/box_definitions.h"
-#include "media/formats/mp4/rcheck.h"
namespace media {
namespace mp4 {
@@ -160,8 +157,8 @@ bool BoxReader::IsValidTopLevelBox(const FourCC& type,
return true;
default:
// Hex is used to show nonprintable characters and aid in debugging
- MEDIA_LOG(log_cb) << "Unrecognized top-level box type "
- << FourCCToString(type);
+ MEDIA_LOG(DEBUG, log_cb) << "Unrecognized top-level box type "
+ << FourCCToString(type);
return false;
}
}
@@ -222,7 +219,8 @@ bool BoxReader::ReadHeader(bool* err) {
CHECK(Read4Into8(&size) && ReadFourCC(&type_));
if (size == 0) {
- // Media Source specific: we do not support boxes that run to EOS.
+ MEDIA_LOG(DEBUG, log_cb_) << "Media Source Extensions do not support ISO "
+ "BMFF boxes that run to EOS";
*err = true;
return false;
} else if (size == 1) {
diff --git a/chromium/media/formats/mp4/box_reader.h b/chromium/media/formats/mp4/box_reader.h
index 3360204ed54..6b593616124 100644
--- a/chromium/media/formats/mp4/box_reader.h
+++ b/chromium/media/formats/mp4/box_reader.h
@@ -22,7 +22,10 @@ class BoxReader;
struct MEDIA_EXPORT Box {
virtual ~Box();
+
+ // Parse errors may be logged using the BoxReader's log callback.
virtual bool Parse(BoxReader* reader) = 0;
+
virtual FourCC BoxType() const = 0;
};
diff --git a/chromium/media/formats/mp4/box_reader_unittest.cc b/chromium/media/formats/mp4/box_reader_unittest.cc
index 72a7765fdf8..62284e8a52b 100644
--- a/chromium/media/formats/mp4/box_reader_unittest.cc
+++ b/chromium/media/formats/mp4/box_reader_unittest.cc
@@ -31,19 +31,19 @@ static const uint8 kSkipBox[] = {
0x00 };
struct FreeBox : Box {
- virtual bool Parse(BoxReader* reader) override {
+ bool Parse(BoxReader* reader) override {
return true;
}
- virtual FourCC BoxType() const override { return FOURCC_FREE; }
+ FourCC BoxType() const override { return FOURCC_FREE; }
};
struct PsshBox : Box {
uint32 val;
- virtual bool Parse(BoxReader* reader) override {
+ bool Parse(BoxReader* reader) override {
return reader->Read4(&val);
}
- virtual FourCC BoxType() const override { return FOURCC_PSSH; }
+ FourCC BoxType() const override { return FOURCC_PSSH; }
};
struct SkipBox : Box {
@@ -55,7 +55,7 @@ struct SkipBox : Box {
std::vector<PsshBox> kids;
FreeBox mpty;
- virtual bool Parse(BoxReader* reader) override {
+ bool Parse(BoxReader* reader) override {
RCHECK(reader->ReadFullBoxHeader() &&
reader->Read1(&a) &&
reader->Read1(&b) &&
@@ -66,10 +66,10 @@ struct SkipBox : Box {
reader->ReadChildren(&kids) &&
reader->MaybeReadChild(&mpty);
}
- virtual FourCC BoxType() const override { return FOURCC_SKIP; }
+ FourCC BoxType() const override { return FOURCC_SKIP; }
SkipBox();
- virtual ~SkipBox();
+ ~SkipBox() override;
};
SkipBox::SkipBox() {}
diff --git a/chromium/media/formats/mp4/mp4_stream_parser.cc b/chromium/media/formats/mp4/mp4_stream_parser.cc
index c6163bf35b6..5c2fcb9e30d 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser.cc
@@ -4,7 +4,6 @@
#include "media/formats/mp4/mp4_stream_parser.h"
-#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/time/time.h"
@@ -22,8 +21,6 @@
namespace media {
namespace mp4 {
-static const char kCencInitDataType[] = "cenc";
-
MP4StreamParser::MP4StreamParser(const std::set<int>& audio_object_types,
bool has_sbr)
: state_(kWaitingForInit),
@@ -42,27 +39,28 @@ MP4StreamParser::MP4StreamParser(const std::set<int>& audio_object_types,
MP4StreamParser::~MP4StreamParser() {}
-void MP4StreamParser::Init(const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool /* ignore_text_tracks */ ,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
+void MP4StreamParser::Init(
+ const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool /* ignore_text_tracks */,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) {
DCHECK_EQ(state_, kWaitingForInit);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
DCHECK(!config_cb.is_null());
DCHECK(!new_buffers_cb.is_null());
- DCHECK(!need_key_cb.is_null());
+ DCHECK(!encrypted_media_init_data_cb.is_null());
DCHECK(!end_of_segment_cb.is_null());
ChangeState(kParsingBoxes);
init_cb_ = init_cb;
config_cb_ = config_cb;
new_buffers_cb_ = new_buffers_cb;
- need_key_cb_ = need_key_cb;
+ encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
log_cb_ = log_cb;
@@ -161,8 +159,8 @@ bool MP4StreamParser::ParseBox(bool* err) {
// before the head of the 'moof', so keeping this box around is sufficient.)
return !(*err);
} else {
- MEDIA_LOG(log_cb_) << "Skipping unrecognized top-level box: "
- << FourCCToString(reader->type());
+ MEDIA_LOG(DEBUG, log_cb_) << "Skipping unrecognized top-level box: "
+ << FourCCToString(reader->type());
}
queue_.Pop(reader->size());
@@ -216,17 +214,18 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
if (!(entry.format == FOURCC_MP4A ||
(entry.format == FOURCC_ENCA &&
entry.sinf.format.format == FOURCC_MP4A))) {
- MEDIA_LOG(log_cb_) << "Unsupported audio format 0x"
- << std::hex << entry.format << " in stsd box.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unsupported audio format 0x" << std::hex
+ << entry.format << " in stsd box.";
return false;
}
uint8 audio_type = entry.esds.object_type;
DVLOG(1) << "audio_type " << std::hex << static_cast<int>(audio_type);
if (audio_object_types_.find(audio_type) == audio_object_types_.end()) {
- MEDIA_LOG(log_cb_) << "audio object type 0x" << std::hex << audio_type
- << " does not match what is specified in the"
- << " mimetype.";
+ MEDIA_LOG(ERROR, log_cb_) << "audio object type 0x" << std::hex
+ << audio_type
+ << " does not match what is specified in the"
+ << " mimetype.";
return false;
}
@@ -244,8 +243,8 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
extra_data = aac.codec_specific_data();
#endif
} else {
- MEDIA_LOG(log_cb_) << "Unsupported audio object type 0x" << std::hex
- << audio_type << " in esds.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unsupported audio object type 0x"
+ << std::hex << audio_type << " in esds.";
return false;
}
@@ -278,8 +277,8 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
const VideoSampleEntry& entry = samp_descr.video_entries[desc_idx];
if (!entry.IsFormatValid()) {
- MEDIA_LOG(log_cb_) << "Unsupported video format 0x"
- << std::hex << entry.format << " in stsd box.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unsupported video format 0x" << std::hex
+ << entry.format << " in stsd box.";
return false;
}
@@ -314,9 +313,11 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
}
if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(true, params);
+ base::ResetAndReturn(&init_cb_).Run(params);
+
+ if (!moov_->pssh.empty())
+ OnEncryptedMediaInitData(moov_->pssh);
- EmitNeedKeyIfNecessary(moov_->pssh);
return true;
}
@@ -328,20 +329,20 @@ bool MP4StreamParser::ParseMoof(BoxReader* reader) {
runs_.reset(new TrackRunIterator(moov_.get(), log_cb_));
RCHECK(runs_->Init(moof));
RCHECK(ComputeHighestEndOffset(moof));
- EmitNeedKeyIfNecessary(moof.pssh);
+
+ if (!moof.pssh.empty())
+ OnEncryptedMediaInitData(moof.pssh);
+
new_segment_cb_.Run();
ChangeState(kWaitingForSampleData);
return true;
}
-void MP4StreamParser::EmitNeedKeyIfNecessary(
+void MP4StreamParser::OnEncryptedMediaInitData(
const std::vector<ProtectionSystemSpecificHeader>& headers) {
// TODO(strobe): ensure that the value of init_data (all PSSH headers
// concatenated in arbitrary order) matches the EME spec.
// See https://www.w3.org/Bugs/Public/show_bug.cgi?id=17673.
- if (headers.empty())
- return;
-
size_t total_size = 0;
for (size_t i = 0; i < headers.size(); i++)
total_size += headers[i].raw_box.size();
@@ -353,7 +354,7 @@ void MP4StreamParser::EmitNeedKeyIfNecessary(
headers[i].raw_box.size());
pos += headers[i].raw_box.size();
}
- need_key_cb_.Run(kCencInitDataType, init_data);
+ encrypted_media_init_data_cb_.Run(EmeInitDataType::CENC, init_data);
}
bool MP4StreamParser::PrepareAVCBuffer(
@@ -395,10 +396,8 @@ bool MP4StreamParser::PrepareAACBuffer(
// As above, adjust subsample information to account for the headers. AAC is
// not required to use subsample encryption, so we may need to add an entry.
if (subsamples->empty()) {
- SubsampleEntry entry;
- entry.clear_bytes = kADTSHeaderMinSize;
- entry.cypher_bytes = frame_buf->size() - kADTSHeaderMinSize;
- subsamples->push_back(entry);
+ subsamples->push_back(SubsampleEntry(
+ kADTSHeaderMinSize, frame_buf->size() - kADTSHeaderMinSize));
} else {
(*subsamples)[0].clear_bytes += kADTSHeaderMinSize;
}
@@ -480,7 +479,7 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
if (video) {
if (!PrepareAVCBuffer(runs_->video_description().avcc,
&frame_buf, &subsamples)) {
- MEDIA_LOG(log_cb_) << "Failed to prepare AVC sample for decode";
+ MEDIA_LOG(ERROR, log_cb_) << "Failed to prepare AVC sample for decode";
*err = true;
return false;
}
@@ -490,7 +489,7 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
if (ESDescriptor::IsAAC(runs_->audio_description().esds.object_type) &&
!PrepareAACBuffer(runs_->audio_description().esds.aac,
&frame_buf, &subsamples)) {
- MEDIA_LOG(log_cb_) << "Failed to prepare AAC sample for decode";
+ MEDIA_LOG(ERROR, log_cb_) << "Failed to prepare AAC sample for decode";
*err = true;
return false;
}
@@ -581,8 +580,8 @@ bool MP4StreamParser::ReadAndDiscardMDATsUntil(int64 max_clear_offset) {
break;
if (type != FOURCC_MDAT) {
- MEDIA_LOG(log_cb_) << "Unexpected box type while parsing MDATs: "
- << FourCCToString(type);
+ MEDIA_LOG(DEBUG, log_cb_) << "Unexpected box type while parsing MDATs: "
+ << FourCCToString(type);
}
mdat_tail_ += box_sz;
}
diff --git a/chromium/media/formats/mp4/mp4_stream_parser.h b/chromium/media/formats/mp4/mp4_stream_parser.h
index b8439bc6bae..376020913e3 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser.h
+++ b/chromium/media/formats/mp4/mp4_stream_parser.h
@@ -26,17 +26,19 @@ class BoxReader;
class MEDIA_EXPORT MP4StreamParser : public StreamParser {
public:
MP4StreamParser(const std::set<int>& audio_object_types, bool has_sbr);
- virtual ~MP4StreamParser();
-
- virtual void Init(const InitCB& init_cb, const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool ignore_text_tracks,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) override;
- virtual void Flush() override;
- virtual bool Parse(const uint8* buf, int size) override;
+ ~MP4StreamParser() override;
+
+ void Init(
+ const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_tracks,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) override;
+ void Flush() override;
+ bool Parse(const uint8* buf, int size) override;
private:
enum State {
@@ -51,7 +53,7 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
bool ParseMoov(mp4::BoxReader* reader);
bool ParseMoof(mp4::BoxReader* reader);
- void EmitNeedKeyIfNecessary(
+ void OnEncryptedMediaInitData(
const std::vector<ProtectionSystemSpecificHeader>& headers);
// To retain proper framing, each 'mdat' atom must be read; to limit memory
@@ -94,7 +96,7 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
InitCB init_cb_;
NewConfigCB config_cb_;
NewBuffersCB new_buffers_cb_;
- NeedKeyCB need_key_cb_;
+ EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
LogCB log_cb_;
diff --git a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
index 5bcb7add29f..8ebc1247828 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
@@ -25,8 +25,6 @@ using base::TimeDelta;
namespace media {
namespace mp4 {
-static const char kCencInitDataType[] = "cenc";
-
class MP4StreamParserTest : public testing::Test {
public:
MP4StreamParserTest()
@@ -60,9 +58,8 @@ class MP4StreamParserTest : public testing::Test {
return true;
}
- void InitF(bool init_ok, const StreamParser::InitParameters& params) {
- DVLOG(1) << "InitF: ok=" << init_ok
- << ", dur=" << params.duration.InMilliseconds()
+ void InitF(const StreamParser::InitParameters& params) {
+ DVLOG(1) << "InitF: dur=" << params.duration.InMilliseconds()
<< ", autoTimestampOffset=" << params.auto_update_timestamp_offset;
}
@@ -118,10 +115,9 @@ class MP4StreamParserTest : public testing::Test {
return true;
}
- void KeyNeededF(const std::string& type,
- const std::vector<uint8>& init_data) {
+ void KeyNeededF(EmeInitDataType type, const std::vector<uint8>& init_data) {
DVLOG(1) << "KeyNeededF: " << init_data.size();
- EXPECT_EQ(kCencInitDataType, type);
+ EXPECT_EQ(EmeInitDataType::CENC, type);
EXPECT_FALSE(init_data.empty());
}
diff --git a/chromium/media/formats/mp4/rcheck.h b/chromium/media/formats/mp4/rcheck.h
index fb0f8f27d4e..d7564874d86 100644
--- a/chromium/media/formats/mp4/rcheck.h
+++ b/chromium/media/formats/mp4/rcheck.h
@@ -6,13 +6,25 @@
#define MEDIA_FORMATS_MP4_RCHECK_H_
#include "base/logging.h"
+#include "media/base/media_log.h"
-#define RCHECK(x) \
- do { \
- if (!(x)) { \
- DLOG(ERROR) << "Failure while parsing MP4: " << #x; \
- return false; \
- } \
- } while (0)
+#define RCHECK_MEDIA_LOGGED(condition, log_cb, msg) \
+ do { \
+ if (!(condition)) { \
+ DLOG(ERROR) << "Failure while parsing MP4: " #condition; \
+ MEDIA_LOG(ERROR, log_cb) << "Failure parsing MP4: " << (msg); \
+ return false; \
+ } \
+ } while (0)
+
+// TODO(wolenetz,chcunningham): Where appropriate, replace usage of this macro
+// in favor of RCHECK_MEDIA_LOGGED. See https://crbug.com/487410.
+#define RCHECK(condition) \
+ do { \
+ if (!(condition)) { \
+ DLOG(ERROR) << "Failure while parsing MP4: " #condition; \
+ return false; \
+ } \
+ } while (0)
#endif // MEDIA_FORMATS_MP4_RCHECK_H_
diff --git a/chromium/media/formats/mp4/track_run_iterator.cc b/chromium/media/formats/mp4/track_run_iterator.cc
index 604074ca727..a216ba8ccf0 100644
--- a/chromium/media/formats/mp4/track_run_iterator.cc
+++ b/chromium/media/formats/mp4/track_run_iterator.cc
@@ -157,7 +157,8 @@ static bool PopulateSampleInfo(const TrackExtends& trex,
break;
case kSampleDependsOnReserved:
- MEDIA_LOG(log_cb) << "Reserved value used in sample dependency info.";
+ MEDIA_LOG(ERROR, log_cb) << "Reserved value used in sample dependency"
+ " info.";
return false;
}
return true;
@@ -518,7 +519,7 @@ scoped_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
if (cenc_info_.empty()) {
DCHECK_EQ(0, aux_info_size());
- MEDIA_LOG(log_cb_) << "Aux Info is not available.";
+ MEDIA_LOG(ERROR, log_cb_) << "Aux Info is not available.";
return scoped_ptr<DecryptConfig>();
}
@@ -530,7 +531,7 @@ scoped_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
if (!cenc_info.subsamples.empty() &&
(!cenc_info.GetTotalSizeOfSubsamples(&total_size) ||
total_size != static_cast<size_t>(sample_size()))) {
- MEDIA_LOG(log_cb_) << "Incorrect CENC subsample size.";
+ MEDIA_LOG(ERROR, log_cb_) << "Incorrect CENC subsample size.";
return scoped_ptr<DecryptConfig>();
}
diff --git a/chromium/media/formats/mpeg/adts_stream_parser.cc b/chromium/media/formats/mpeg/adts_stream_parser.cc
index beb94350c9c..ea0f5923096 100644
--- a/chromium/media/formats/mpeg/adts_stream_parser.cc
+++ b/chromium/media/formats/mpeg/adts_stream_parser.cc
@@ -69,12 +69,11 @@ int ADTSStreamParser::ParseFrameHeader(const uint8* data,
if (sync != 0xfff || layer != 0 || frame_length < bytes_read ||
sample_rate_index >= kADTSFrequencyTableSize ||
channel_layout_index >= kADTSChannelLayoutTableSize) {
- MEDIA_LOG(log_cb()) << "Invalid header data :" << std::hex
- << " sync 0x" << sync
- << " version 0x" << version
- << " layer 0x" << layer
- << " sample_rate_index 0x" << sample_rate_index
- << " channel_layout_index 0x" << channel_layout_index;
+ MEDIA_LOG(DEBUG, log_cb())
+ << "Invalid header data :" << std::hex << " sync 0x" << sync
+ << " version 0x" << version << " layer 0x" << layer
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_layout_index 0x" << channel_layout_index;
return -1;
}
diff --git a/chromium/media/formats/mpeg/adts_stream_parser_unittest.cc b/chromium/media/formats/mpeg/adts_stream_parser_unittest.cc
index b9eb0d9c6c5..27b5e4f2edb 100644
--- a/chromium/media/formats/mpeg/adts_stream_parser_unittest.cc
+++ b/chromium/media/formats/mpeg/adts_stream_parser_unittest.cc
@@ -11,9 +11,7 @@ namespace media {
class ADTSStreamParserTest : public StreamParserTestBase, public testing::Test {
public:
ADTSStreamParserTest()
- : StreamParserTestBase(
- scoped_ptr<StreamParser>(new ADTSStreamParser()).Pass()) {}
- virtual ~ADTSStreamParserTest() {}
+ : StreamParserTestBase(make_scoped_ptr(new ADTSStreamParser())) {}
};
// Test parsing with small prime sized chunks to smoke out "power of
diff --git a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc
index 92bd4ce104e..204ee732fce 100644
--- a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc
+++ b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc
@@ -128,31 +128,28 @@ bool MPEG1AudioStreamParser::ParseHeader(
layer == kLayerReserved ||
bitrate_index == kBitrateFree || bitrate_index == kBitrateBad ||
sample_rate_index == kSampleRateReserved) {
- MEDIA_LOG(log_cb) << "Invalid header data :" << std::hex
- << " sync 0x" << sync
- << " version 0x" << version
- << " layer 0x" << layer
- << " bitrate_index 0x" << bitrate_index
- << " sample_rate_index 0x" << sample_rate_index
- << " channel_mode 0x" << channel_mode;
+ MEDIA_LOG(ERROR, log_cb)
+ << "Invalid header data :" << std::hex << " sync 0x" << sync
+ << " version 0x" << version << " layer 0x" << layer
+ << " bitrate_index 0x" << bitrate_index << " sample_rate_index 0x"
+ << sample_rate_index << " channel_mode 0x" << channel_mode;
return false;
}
if (layer == kLayer2 && kIsAllowed[bitrate_index][channel_mode]) {
- MEDIA_LOG(log_cb) << "Invalid (bitrate_index, channel_mode) combination :"
- << std::hex
- << " bitrate_index " << bitrate_index
- << " channel_mode " << channel_mode;
+ MEDIA_LOG(ERROR, log_cb) << "Invalid (bitrate_index, channel_mode)"
+ << " combination :" << std::hex
+ << " bitrate_index " << bitrate_index
+ << " channel_mode " << channel_mode;
return false;
}
int bitrate = kBitrateMap[bitrate_index][kVersionLayerMap[version][layer]];
if (bitrate == 0) {
- MEDIA_LOG(log_cb) << "Invalid bitrate :" << std::hex
- << " version " << version
- << " layer " << layer
- << " bitrate_index " << bitrate_index;
+ MEDIA_LOG(ERROR, log_cb) << "Invalid bitrate :" << std::hex << " version "
+ << version << " layer " << layer
+ << " bitrate_index " << bitrate_index;
return false;
}
@@ -160,9 +157,9 @@ bool MPEG1AudioStreamParser::ParseHeader(
int frame_sample_rate = kSampleRateMap[sample_rate_index][version];
if (frame_sample_rate == 0) {
- MEDIA_LOG(log_cb) << "Invalid sample rate :" << std::hex
- << " version " << version
- << " sample_rate_index " << sample_rate_index;
+ MEDIA_LOG(ERROR, log_cb) << "Invalid sample rate :" << std::hex
+ << " version " << version << " sample_rate_index "
+ << sample_rate_index;
return false;
}
header->sample_rate = frame_sample_rate;
@@ -278,7 +275,7 @@ int MPEG1AudioStreamParser::ParseFrameHeader(const uint8* data,
// Check to see if the tag contains 'Xing' or 'Info'
if (tag == 0x496e666f || tag == 0x58696e67) {
- MEDIA_LOG(log_cb()) << "Skipping XING header.";
+ MEDIA_LOG(DEBUG, log_cb()) << "Skipping XING header.";
if (metadata_frame)
*metadata_frame = true;
return header_bytes_read + reader.bits_read() / 8;
diff --git a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser_unittest.cc b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser_unittest.cc
index 1c245161303..b247acb42cf 100644
--- a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser_unittest.cc
+++ b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser_unittest.cc
@@ -13,9 +13,7 @@ class MPEG1AudioStreamParserTest
: public StreamParserTestBase, public testing::Test {
public:
MPEG1AudioStreamParserTest()
- : StreamParserTestBase(
- scoped_ptr<StreamParser>(new MPEG1AudioStreamParser()).Pass()) {}
- virtual ~MPEG1AudioStreamParserTest() {}
+ : StreamParserTestBase(make_scoped_ptr(new MPEG1AudioStreamParser())) {}
};
// Test parsing with small prime sized chunks to smoke out "power of
diff --git a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
index 89c1b622861..14fe143c791 100644
--- a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
+++ b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
@@ -54,14 +54,15 @@ MPEGAudioStreamParserBase::MPEGAudioStreamParserBase(uint32 start_code_mask,
MPEGAudioStreamParserBase::~MPEGAudioStreamParserBase() {}
-void MPEGAudioStreamParserBase::Init(const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool ignore_text_tracks,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
+void MPEGAudioStreamParserBase::Init(
+ const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_tracks,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) {
DVLOG(1) << __FUNCTION__;
DCHECK_EQ(state_, UNINITIALIZED);
init_cb_ = init_cb;
@@ -220,16 +221,14 @@ int MPEGAudioStreamParserBase::ParseFrame(const uint8* data,
timestamp_helper_->SetBaseTimestamp(base_timestamp);
VideoDecoderConfig video_config;
- bool success = config_cb_.Run(config_, video_config, TextTrackConfigMap());
+ if (!config_cb_.Run(config_, video_config, TextTrackConfigMap()))
+ return -1;
if (!init_cb_.is_null()) {
InitParameters params(kInfiniteDuration());
params.auto_update_timestamp_offset = true;
- base::ResetAndReturn(&init_cb_).Run(success, params);
+ base::ResetAndReturn(&init_cb_).Run(params);
}
-
- if (!success)
- return -1;
}
if (metadata_frame)
@@ -263,7 +262,7 @@ int MPEGAudioStreamParserBase::ParseIcecastHeader(const uint8* data, int size) {
int offset = LocateEndOfHeaders(data, locate_size, 4);
if (offset < 0) {
if (locate_size == kMaxIcecastHeaderSize) {
- MEDIA_LOG(log_cb_) << "Icecast header is too large.";
+ MEDIA_LOG(ERROR, log_cb_) << "Icecast header is too large.";
return -1;
}
@@ -324,7 +323,7 @@ bool MPEGAudioStreamParserBase::ParseSyncSafeInt(BitReader* reader,
for (int i = 0; i < 4; ++i) {
uint8 tmp;
if (!reader->ReadBits(1, &tmp) || tmp != 0) {
- MEDIA_LOG(log_cb_) << "ID3 syncsafe integer byte MSb is not 0!";
+ MEDIA_LOG(ERROR, log_cb_) << "ID3 syncsafe integer byte MSb is not 0!";
return false;
}
diff --git a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
index fc736c2bb85..2443322fdd9 100644
--- a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
+++ b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
@@ -35,7 +35,7 @@ class MEDIA_EXPORT MPEGAudioStreamParserBase : public StreamParser {
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
bool ignore_text_tracks,
- const NeedKeyCB& need_key_cb,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) override;
diff --git a/chromium/media/formats/webm/opus_packet_builder.cc b/chromium/media/formats/webm/opus_packet_builder.cc
new file mode 100644
index 00000000000..b2dd8dca1e1
--- /dev/null
+++ b/chromium/media/formats/webm/opus_packet_builder.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "media/formats/webm/opus_packet_builder.h"
+#include "media/formats/webm/webm_cluster_parser.h"
+
+namespace media {
+
+OpusPacket::OpusPacket(uint8_t config, uint8_t frame_count, bool is_VBR) {
+ DCHECK_GE(config, 0);
+ DCHECK_LT(config, kNumPossibleOpusConfigs);
+ DCHECK_GE(frame_count, kMinOpusPacketFrameCount);
+ DCHECK_LE(frame_count, kMaxOpusPacketFrameCount);
+
+ duration_ms_ = frame_count *
+ WebMClusterParser::kOpusFrameDurationsMu[config] /
+ static_cast<float>(1000);
+
+ uint8_t frame_count_code;
+ uint8_t frame_count_byte;
+
+ if (frame_count == 1) {
+ frame_count_code = 0;
+ } else if (frame_count == 2) {
+ frame_count_code = is_VBR ? 2 : 1;
+ } else {
+ frame_count_code = 3;
+ frame_count_byte = (is_VBR ? 1 << 7 : 0) | frame_count;
+ }
+
+ // All opus packets must have TOC byte.
+ uint8_t opus_toc_byte = (config << 3) | frame_count_code;
+ data_.push_back(opus_toc_byte);
+
+ // For code 3 packets, the number of frames is signaled in the "frame
+ // count byte".
+ if (frame_count_code == 3) {
+ data_.push_back(frame_count_byte);
+ }
+
+ // Packet will only conform to layout specification for the TOC byte
+ // and optional frame count bytes appended above. This last byte
+ // is purely dummy padding where frame size data or encoded data might
+ // otherwise start.
+ data_.push_back(static_cast<uint8_t>(0));
+}
+
+OpusPacket::~OpusPacket() {
+}
+
+const uint8_t* OpusPacket::data() const {
+ return &(data_[0]);
+}
+
+int OpusPacket::size() const {
+ return data_.size();
+}
+
+double OpusPacket::duration_ms() const {
+ return duration_ms_;
+}
+
+ScopedVector<OpusPacket> BuildAllOpusPackets() {
+ ScopedVector<OpusPacket> opus_packets;
+
+ for (int frame_count = kMinOpusPacketFrameCount;
+ frame_count <= kMaxOpusPacketFrameCount; frame_count++) {
+ for (int opus_config_num = 0; opus_config_num < kNumPossibleOpusConfigs;
+ opus_config_num++) {
+ bool is_VBR = false;
+ opus_packets.push_back(
+ new OpusPacket(opus_config_num, frame_count, is_VBR));
+
+ if (frame_count >= 2) {
+ // Add another packet with VBR flag toggled. For frame counts >= 2,
+ // VBR triggers changes to packet framing.
+ is_VBR = true;
+ opus_packets.push_back(
+ new OpusPacket(opus_config_num, frame_count, is_VBR));
+ }
+ }
+ }
+
+ return opus_packets.Pass();
+}
+
+} // namespace media
diff --git a/chromium/media/formats/webm/opus_packet_builder.h b/chromium/media/formats/webm/opus_packet_builder.h
new file mode 100644
index 00000000000..af76b49a398
--- /dev/null
+++ b/chromium/media/formats/webm/opus_packet_builder.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
+#define MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
+
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+
+namespace media {
+
+// From Opus RFC. See https://tools.ietf.org/html/rfc6716#page-14
+enum OpusConstants {
+ kNumPossibleOpusConfigs = 32,
+ kMinOpusPacketFrameCount = 1,
+ kMaxOpusPacketFrameCount = 48
+};
+
+class OpusPacket {
+ public:
+ OpusPacket(uint8_t config, uint8_t frame_count, bool is_VBR);
+ ~OpusPacket();
+
+ const uint8_t* data() const;
+ int size() const;
+ double duration_ms() const;
+
+ private:
+ std::vector<uint8_t> data_;
+ double duration_ms_;
+
+ DISALLOW_COPY_AND_ASSIGN(OpusPacket);
+};
+
+// Builds an exhaustive collection of Opus packet configurations.
+ScopedVector<OpusPacket> BuildAllOpusPackets();
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
diff --git a/chromium/media/formats/webm/webm_audio_client.cc b/chromium/media/formats/webm/webm_audio_client.cc
index 92bb40f0ace..ea911ec290e 100644
--- a/chromium/media/formats/webm/webm_audio_client.cc
+++ b/chromium/media/formats/webm/webm_audio_client.cc
@@ -37,7 +37,7 @@ bool WebMAudioClient::InitializeConfig(
} else if (codec_id == "A_OPUS") {
audio_codec = kCodecOpus;
} else {
- MEDIA_LOG(log_cb_) << "Unsupported audio codec_id " << codec_id;
+ MEDIA_LOG(ERROR, log_cb_) << "Unsupported audio codec_id " << codec_id;
return false;
}
@@ -51,7 +51,7 @@ bool WebMAudioClient::InitializeConfig(
ChannelLayout channel_layout = GuessChannelLayout(channels_);
if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) {
- MEDIA_LOG(log_cb_) << "Unsupported channel count " << channels_;
+ MEDIA_LOG(ERROR, log_cb_) << "Unsupported channel count " << channels_;
return false;
}
@@ -100,9 +100,9 @@ bool WebMAudioClient::InitializeConfig(
bool WebMAudioClient::OnUInt(int id, int64 val) {
if (id == kWebMIdChannels) {
if (channels_ != -1) {
- MEDIA_LOG(log_cb_) << "Multiple values for id " << std::hex << id
- << " specified. (" << channels_ << " and " << val
- << ")";
+ MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
+ << " specified. (" << channels_ << " and "
+ << val << ")";
return false;
}
@@ -129,8 +129,9 @@ bool WebMAudioClient::OnFloat(int id, double val) {
return false;
if (*dst != -1) {
- MEDIA_LOG(log_cb_) << "Multiple values for id " << std::hex << id
- << " specified (" << *dst << " and " << val << ")";
+ MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
+ << " specified (" << *dst << " and " << val
+ << ")";
return false;
}
diff --git a/chromium/media/formats/webm/webm_cluster_parser.cc b/chromium/media/formats/webm/webm_cluster_parser.cc
index 6cb51c82238..a76559d241a 100644
--- a/chromium/media/formats/webm/webm_cluster_parser.cc
+++ b/chromium/media/formats/webm/webm_cluster_parser.cc
@@ -17,6 +17,20 @@
namespace media {
+const uint16_t WebMClusterParser::kOpusFrameDurationsMu[] = {
+ 10000, 20000, 40000, 60000, 10000, 20000, 40000, 60000, 10000, 20000, 40000,
+ 60000, 10000, 20000, 10000, 20000, 2500, 5000, 10000, 20000, 2500, 5000,
+ 10000, 20000, 2500, 5000, 10000, 20000, 2500, 5000, 10000, 20000};
+
+enum {
+ // Limits the number of MEDIA_LOG() calls in the path of reading encoded
+ // duration to avoid spamming for corrupted data.
+ kMaxDurationErrorLogs = 10,
+ // Limits the number of MEDIA_LOG() calls warning the user that buffer
+ // durations have been estimated.
+ kMaxDurationEstimateLogs = 10,
+};
+
WebMClusterParser::WebMClusterParser(
int64 timecode_scale,
int audio_track_num,
@@ -27,17 +41,20 @@ WebMClusterParser::WebMClusterParser(
const std::set<int64>& ignored_tracks,
const std::string& audio_encryption_key_id,
const std::string& video_encryption_key_id,
+ const AudioCodec audio_codec,
const LogCB& log_cb)
- : timecode_multiplier_(timecode_scale / 1000.0),
+ : num_duration_errors_(0),
+ timecode_multiplier_(timecode_scale / 1000.0),
ignored_tracks_(ignored_tracks),
audio_encryption_key_id_(audio_encryption_key_id),
video_encryption_key_id_(video_encryption_key_id),
+ audio_codec_(audio_codec),
parser_(kWebMIdCluster, this),
last_block_timecode_(-1),
block_data_size_(-1),
block_duration_(-1),
block_add_id_(-1),
- block_additional_data_size_(-1),
+ block_additional_data_size_(0),
discard_padding_(-1),
cluster_timecode_(-1),
cluster_start_time_(kNoTimestamp()),
@@ -68,7 +85,7 @@ void WebMClusterParser::Reset() {
ready_buffer_upper_bound_ = kNoDecodeTimestamp();
}
-int WebMClusterParser::Parse(const uint8* buf, int size) {
+int WebMClusterParser::Parse(const uint8_t* buf, int size) {
audio_.ClearReadyBuffers();
video_.ClearReadyBuffers();
ClearTextTrackReadyBuffers();
@@ -140,6 +157,106 @@ WebMClusterParser::GetTextBuffers() {
return text_buffers_map_;
}
+base::TimeDelta WebMClusterParser::TryGetEncodedAudioDuration(
+ const uint8_t* data,
+ int size) {
+
+ // Duration is currently read assuming the *entire* stream is unencrypted.
+ // The special "Signal Byte" prepended to Blocks in encrypted streams is
+ // assumed to not be present.
+ // TODO(chcunningham): Consider parsing "Signal Byte" for encrypted streams
+ // to return duration for any unencrypted blocks.
+
+ if (audio_codec_ == kCodecOpus) {
+ return ReadOpusDuration(data, size);
+ }
+
+ // TODO(wolenetz/chcunningham): Implement duration reading for Vorbis. See
+ // motivations in http://crbug.com/396634.
+
+ return kNoTimestamp();
+}
+
+base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
+ int size) {
+ // Masks and constants for Opus packets. See
+ // https://tools.ietf.org/html/rfc6716#page-14
+ static const uint8_t kTocConfigMask = 0xf8;
+ static const uint8_t kTocFrameCountCodeMask = 0x03;
+ static const uint8_t kFrameCountMask = 0x3f;
+ static const base::TimeDelta kPacketDurationMax =
+ base::TimeDelta::FromMilliseconds(120);
+
+ if (size < 1) {
+ LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ kMaxDurationErrorLogs)
+ << "Invalid zero-byte Opus packet; demuxed block duration may be "
+ "imprecise.";
+ return kNoTimestamp();
+ }
+
+ // Frame count type described by last 2 bits of Opus TOC byte.
+ int frame_count_type = data[0] & kTocFrameCountCodeMask;
+
+ int frame_count = 0;
+ switch (frame_count_type) {
+ case 0:
+ frame_count = 1;
+ break;
+ case 1:
+ case 2:
+ frame_count = 2;
+ break;
+ case 3:
+ // Type 3 indicates an arbitrary frame count described in the next byte.
+ if (size < 2) {
+ LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ kMaxDurationErrorLogs)
+ << "Second byte missing from 'Code 3' Opus packet; demuxed block "
+ "duration may be imprecise.";
+ return kNoTimestamp();
+ }
+
+ frame_count = data[1] & kFrameCountMask;
+
+ if (frame_count == 0) {
+ LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ kMaxDurationErrorLogs)
+ << "Illegal 'Code 3' Opus packet with frame count zero; demuxed "
+ "block duration may be imprecise.";
+ return kNoTimestamp();
+ }
+
+ break;
+ default:
+ LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ kMaxDurationErrorLogs)
+ << "Unexpected Opus frame count type: " << frame_count_type << "; "
+ << "demuxed block duration may be imprecise.";
+ return kNoTimestamp();
+ }
+
+ int opusConfig = (data[0] & kTocConfigMask) >> 3;
+ CHECK_GE(opusConfig, 0);
+ CHECK_LT(opusConfig, static_cast<int>(arraysize(kOpusFrameDurationsMu)));
+
+ DCHECK_GT(frame_count, 0);
+ base::TimeDelta duration = base::TimeDelta::FromMicroseconds(
+ kOpusFrameDurationsMu[opusConfig] * frame_count);
+
+ if (duration > kPacketDurationMax) {
+ // Intentionally allowing packet to pass through for now. Decoder should
+ // either handle or fail gracefully. MEDIA_LOG as breadcrumbs in case
+ // things go sideways.
+ LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ kMaxDurationErrorLogs)
+ << "Warning, demuxed Opus packet with encoded duration: " << duration
+ << ". Should be no greater than " << kPacketDurationMax;
+ }
+
+ return duration;
+}
+
WebMParserClient* WebMClusterParser::OnListStart(int id) {
if (id == kWebMIdCluster) {
cluster_timecode_ = -1;
@@ -153,7 +270,7 @@ WebMParserClient* WebMClusterParser::OnListStart(int id) {
} else if (id == kWebMIdBlockAdditions) {
block_add_id_ = -1;
block_additional_data_.reset();
- block_additional_data_size_ = -1;
+ block_additional_data_size_ = 0;
}
return this;
@@ -165,7 +282,7 @@ bool WebMClusterParser::OnListEnd(int id) {
// Make sure the BlockGroup actually had a Block.
if (block_data_size_ == -1) {
- MEDIA_LOG(log_cb_) << "Block missing from BlockGroup.";
+ MEDIA_LOG(ERROR, log_cb_) << "Block missing from BlockGroup.";
return false;
}
@@ -178,7 +295,7 @@ bool WebMClusterParser::OnListEnd(int id) {
block_duration_ = -1;
block_add_id_ = -1;
block_additional_data_.reset();
- block_additional_data_size_ = -1;
+ block_additional_data_size_ = 0;
discard_padding_ = -1;
discard_padding_set_ = false;
return result;
@@ -205,9 +322,12 @@ bool WebMClusterParser::OnUInt(int id, int64 val) {
return true;
}
-bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
- int size, const uint8* additional,
- int additional_size, int duration,
+bool WebMClusterParser::ParseBlock(bool is_simple_block,
+ const uint8_t* buf,
+ int size,
+ const uint8_t* additional,
+ int additional_size,
+ int duration,
int64 discard_padding) {
if (size < 4)
return false;
@@ -215,7 +335,7 @@ bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
// Return an error if the trackNum > 127. We just aren't
// going to support large track numbers right now.
if (!(buf[0] & 0x80)) {
- MEDIA_LOG(log_cb_) << "TrackNumber over 127 not supported";
+ MEDIA_LOG(ERROR, log_cb_) << "TrackNumber over 127 not supported";
return false;
}
@@ -225,7 +345,8 @@ bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
int lacing = (flags >> 1) & 0x3;
if (lacing) {
- MEDIA_LOG(log_cb_) << "Lacing " << lacing << " is not supported yet.";
+ MEDIA_LOG(ERROR, log_cb_) << "Lacing " << lacing
+ << " is not supported yet.";
return false;
}
@@ -233,25 +354,25 @@ bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
if (timecode & 0x8000)
timecode |= ~0xffff;
- const uint8* frame_data = buf + 4;
+ const uint8_t* frame_data = buf + 4;
int frame_size = size - (frame_data - buf);
return OnBlock(is_simple_block, track_num, timecode, duration, flags,
frame_data, frame_size, additional, additional_size,
discard_padding);
}
-bool WebMClusterParser::OnBinary(int id, const uint8* data, int size) {
+bool WebMClusterParser::OnBinary(int id, const uint8_t* data, int size) {
switch (id) {
case kWebMIdSimpleBlock:
- return ParseBlock(true, data, size, NULL, -1, -1, 0);
+ return ParseBlock(true, data, size, NULL, 0, -1, 0);
case kWebMIdBlock:
if (block_data_) {
- MEDIA_LOG(log_cb_) << "More than 1 Block in a BlockGroup is not "
- "supported.";
+ MEDIA_LOG(ERROR, log_cb_) << "More than 1 Block in a BlockGroup is not "
+ "supported.";
return false;
}
- block_data_.reset(new uint8[size]);
+ block_data_.reset(new uint8_t[size]);
memcpy(block_data_.get(), data, size);
block_data_size_ = size;
return true;
@@ -263,15 +384,15 @@ bool WebMClusterParser::OnBinary(int id, const uint8* data, int size) {
// as per matroska spec. But for now we don't have a use case to
// support parsing of such files. Take a look at this again when such a
// case arises.
- MEDIA_LOG(log_cb_) << "More than 1 BlockAdditional in a BlockGroup is "
- "not supported.";
+ MEDIA_LOG(ERROR, log_cb_) << "More than 1 BlockAdditional in a "
+ "BlockGroup is not supported.";
return false;
}
// First 8 bytes of side_data in DecoderBuffer is the BlockAddID
// element's value in Big Endian format. This is done to mimic ffmpeg
// demuxer's behavior.
block_additional_data_size_ = size + sizeof(block_add_id);
- block_additional_data_.reset(new uint8[block_additional_data_size_]);
+ block_additional_data_.reset(new uint8_t[block_additional_data_size_]);
memcpy(block_additional_data_.get(), &block_add_id,
sizeof(block_add_id));
memcpy(block_additional_data_.get() + 8, data, size);
@@ -294,29 +415,32 @@ bool WebMClusterParser::OnBinary(int id, const uint8* data, int size) {
}
}
-bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
+bool WebMClusterParser::OnBlock(bool is_simple_block,
+ int track_num,
int timecode,
- int block_duration,
+ int block_duration,
int flags,
- const uint8* data, int size,
- const uint8* additional, int additional_size,
+ const uint8_t* data,
+ int size,
+ const uint8_t* additional,
+ int additional_size,
int64 discard_padding) {
DCHECK_GE(size, 0);
if (cluster_timecode_ == -1) {
- MEDIA_LOG(log_cb_) << "Got a block before cluster timecode.";
+ MEDIA_LOG(ERROR, log_cb_) << "Got a block before cluster timecode.";
return false;
}
// TODO(acolwell): Should relative negative timecode offsets be rejected? Or
// only when the absolute timecode is negative? See http://crbug.com/271794
if (timecode < 0) {
- MEDIA_LOG(log_cb_) << "Got a block with negative timecode offset "
- << timecode;
+ MEDIA_LOG(ERROR, log_cb_) << "Got a block with negative timecode offset "
+ << timecode;
return false;
}
if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
- MEDIA_LOG(log_cb_)
+ MEDIA_LOG(ERROR, log_cb_)
<< "Got a block with a timecode before the previous block.";
return false;
}
@@ -324,9 +448,13 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
Track* track = NULL;
StreamParserBuffer::Type buffer_type = DemuxerStream::AUDIO;
std::string encryption_key_id;
+ base::TimeDelta encoded_duration = kNoTimestamp();
if (track_num == audio_.track_num()) {
track = &audio_;
encryption_key_id = audio_encryption_key_id_;
+ if (encryption_key_id.empty()) {
+ encoded_duration = TryGetEncodedAudioDuration(data, size);
+ }
} else if (track_num == video_.track_num()) {
track = &video_;
encryption_key_id = video_encryption_key_id_;
@@ -341,7 +469,7 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
track = text_track;
buffer_type = DemuxerStream::TEXT;
} else {
- MEDIA_LOG(log_cb_) << "Unexpected track number " << track_num;
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected track number " << track_num;
return false;
}
@@ -367,7 +495,7 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
if (!encryption_key_id.empty() &&
!WebMCreateDecryptConfig(
data, size,
- reinterpret_cast<const uint8*>(encryption_key_id.data()),
+ reinterpret_cast<const uint8_t*>(encryption_key_id.data()),
encryption_key_id.size(),
&decrypt_config, &data_offset)) {
return false;
@@ -387,7 +515,7 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
std::string id, settings, content;
WebMWebVTTParser::Parse(data, size, &id, &settings, &content);
- std::vector<uint8> side_data;
+ std::vector<uint8_t> side_data;
MakeSideData(id.begin(), id.end(),
settings.begin(), settings.end(),
&side_data);
@@ -396,7 +524,7 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
// type with remapped bytestream track numbers and allow multiple tracks as
// applicable. See https://crbug.com/341581.
buffer = StreamParserBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(content.data()),
+ reinterpret_cast<const uint8_t*>(content.data()),
content.length(),
&side_data[0],
side_data.size(),
@@ -407,9 +535,48 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
if (cluster_start_time_ == kNoTimestamp())
cluster_start_time_ = timestamp;
+ base::TimeDelta block_duration_time_delta = kNoTimestamp();
if (block_duration >= 0) {
- buffer->set_duration(base::TimeDelta::FromMicroseconds(
- block_duration * timecode_multiplier_));
+ block_duration_time_delta = base::TimeDelta::FromMicroseconds(
+ block_duration * timecode_multiplier_);
+ }
+
+ // Prefer encoded duration over BlockGroup->BlockDuration or
+ // TrackEntry->DefaultDuration when available. This layering violation is a
+ // workaround for http://crbug.com/396634, decreasing the likelihood of
+ // fall-back to rough estimation techniques for Blocks that lack a
+ // BlockDuration at the end of a cluster. Cross cluster durations are not
+ // feasible given flexibility of cluster ordering and MSE APIs. Duration
+ // estimation may still apply in cases of encryption and codecs for which
+ // we do not extract encoded duration. Within a cluster, estimates are applied
+ // as Block Timecode deltas, or once the whole cluster is parsed in the case
+ // of the last Block in the cluster. See Track::AddBuffer and
+ // ApplyDurationEstimateIfNeeded().
+ if (encoded_duration != kNoTimestamp()) {
+ DCHECK(encoded_duration != kInfiniteDuration());
+ DCHECK(encoded_duration > base::TimeDelta());
+ buffer->set_duration(encoded_duration);
+
+ DVLOG(3) << __FUNCTION__ << " : "
+ << "Using encoded duration " << encoded_duration.InSecondsF();
+
+ if (block_duration_time_delta != kNoTimestamp()) {
+ base::TimeDelta duration_difference =
+ block_duration_time_delta - encoded_duration;
+
+ const auto kWarnDurationDiff =
+ base::TimeDelta::FromMicroseconds(timecode_multiplier_ * 2);
+ if (duration_difference.magnitude() > kWarnDurationDiff) {
+ LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ kMaxDurationErrorLogs)
+ << "BlockDuration "
+ << "(" << block_duration_time_delta << ") "
+ << "differs significantly from encoded duration "
+ << "(" << encoded_duration << ").";
+ }
+ }
+ } else if (block_duration_time_delta != kNoTimestamp()) {
+ buffer->set_duration(block_duration_time_delta);
} else {
DCHECK_NE(buffer_type, DemuxerStream::TEXT);
buffer->set_duration(track->default_duration());
@@ -428,7 +595,8 @@ WebMClusterParser::Track::Track(int track_num,
bool is_video,
base::TimeDelta default_duration,
const LogCB& log_cb)
- : track_num_(track_num),
+ : num_duration_estimates_(0),
+ track_num_(track_num),
is_video_(is_video),
default_duration_(default_duration),
estimated_next_frame_duration_(kNoTimestamp()),
@@ -486,7 +654,7 @@ bool WebMClusterParser::Track::AddBuffer(
DVLOG(2) << "AddBuffer() : " << track_num_
<< " ts " << buffer->timestamp().InSecondsF()
<< " dur " << buffer->duration().InSecondsF()
- << " kf " << buffer->IsKeyframe()
+ << " kf " << buffer->is_key_frame()
<< " size " << buffer->data_size();
if (last_added_buffer_missing_duration_.get()) {
@@ -499,7 +667,7 @@ bool WebMClusterParser::Track::AddBuffer(
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
- << " kf " << last_added_buffer_missing_duration_->IsKeyframe()
+ << " kf " << last_added_buffer_missing_duration_->is_key_frame()
<< " size " << last_added_buffer_missing_duration_->data_size();
scoped_refptr<StreamParserBuffer> updated_buffer =
last_added_buffer_missing_duration_;
@@ -521,14 +689,29 @@ void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
if (!last_added_buffer_missing_duration_.get())
return;
- last_added_buffer_missing_duration_->set_duration(GetDurationEstimate());
+ base::TimeDelta estimated_duration = GetDurationEstimate();
+ last_added_buffer_missing_duration_->set_duration(estimated_duration);
+
+ if (is_video_) {
+ // Exposing estimation so splicing/overlap frame processing can make
+ // informed decisions downstream.
+ // TODO(chcunningham): Set this for audio as well in later change where
+ // audio is switched to max estimation and splicing is disabled.
+ last_added_buffer_missing_duration_->set_is_duration_estimated(true);
+ }
- DVLOG(2) << "ApplyDurationEstimateIfNeeded() : new dur : "
- << " ts "
+ LIMITED_MEDIA_LOG(INFO, log_cb_, num_duration_estimates_,
+ kMaxDurationEstimateLogs)
+ << "Estimating WebM block duration to be " << estimated_duration << " "
+ << "for the last (Simple)Block in the Cluster for this Track. Use "
+ << "BlockGroups with BlockDurations at the end of each Track in a "
+ << "Cluster to avoid estimation.";
+
+ DVLOG(2) << __FUNCTION__ << " new dur : ts "
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
- << " kf " << last_added_buffer_missing_duration_->IsKeyframe()
+ << " kf " << last_added_buffer_missing_duration_->is_key_frame()
<< " size " << last_added_buffer_missing_duration_->data_size();
// Don't use the applied duration as a future estimation (don't use
@@ -549,7 +732,7 @@ void WebMClusterParser::Track::Reset() {
last_added_buffer_missing_duration_ = NULL;
}
-bool WebMClusterParser::Track::IsKeyframe(const uint8* data, int size) const {
+bool WebMClusterParser::Track::IsKeyframe(const uint8_t* data, int size) const {
// For now, assume that all blocks are keyframes for datatypes other than
// video. This is a valid assumption for Vorbis, WebVTT, & Opus.
if (!is_video_)
@@ -585,20 +768,40 @@ bool WebMClusterParser::Track::QueueBuffer(
base::TimeDelta duration = buffer->duration();
if (duration < base::TimeDelta() || duration == kNoTimestamp()) {
- MEDIA_LOG(log_cb_) << "Invalid buffer duration: " << duration.InSecondsF();
+ MEDIA_LOG(ERROR, log_cb_)
+ << "Invalid buffer duration: " << duration.InSecondsF();
return false;
}
- // The estimated frame duration is the minimum non-zero duration since the
- // last initialization segment. The minimum is used to ensure frame durations
- // aren't overestimated.
+ // The estimated frame duration is the minimum (for audio) or the maximum
+ // (for video) non-zero duration since the last initialization segment. The
+ // minimum is used for audio to ensure frame durations aren't overestimated,
+ // triggering unnecessary frame splicing. For video, splicing does not apply,
+ // so maximum is used and overlap is simply resolved by showing the
+ // later of the overlapping frames at its given PTS, effectively trimming down
+ // the over-estimated duration of the previous frame.
+ // TODO(chcunningham): Use max for audio and disable splicing whenever
+ // estimated buffers are encountered.
if (duration > base::TimeDelta()) {
+ base::TimeDelta orig_duration_estimate = estimated_next_frame_duration_;
if (estimated_next_frame_duration_ == kNoTimestamp()) {
estimated_next_frame_duration_ = duration;
+ } else if (is_video_) {
+ estimated_next_frame_duration_ =
+ std::max(duration, estimated_next_frame_duration_);
} else {
estimated_next_frame_duration_ =
std::min(duration, estimated_next_frame_duration_);
}
+
+ if (orig_duration_estimate != estimated_next_frame_duration_) {
+ DVLOG(3) << "Updated duration estimate:"
+ << orig_duration_estimate
+ << " -> "
+ << estimated_next_frame_duration_
+ << " at timestamp: "
+ << buffer->GetDecodeTimestamp().InSecondsF();
+ }
}
buffers_.push_back(buffer);
diff --git a/chromium/media/formats/webm/webm_cluster_parser.h b/chromium/media/formats/webm/webm_cluster_parser.h
index 3b341e66b98..0eef9867376 100644
--- a/chromium/media/formats/webm/webm_cluster_parser.h
+++ b/chromium/media/formats/webm/webm_cluster_parser.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_decoder_config.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
#include "media/base/stream_parser.h"
@@ -26,15 +27,23 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
typedef std::map<TrackId, const BufferQueue> TextBufferQueueMap;
- // Arbitrarily-chosen numbers to estimate the duration of a buffer if none is
- // set and there is not enough information to get a better estimate.
- // TODO(wolenetz/acolwell): Parse audio codebook to determine missing audio
- // frame durations. See http://crbug.com/351166.
+ // Numbers chosen to estimate the duration of a buffer if none is set and
+ // there is not enough information to get a better estimate.
enum {
- kDefaultAudioBufferDurationInMs = 23, // Common 1k samples @44.1kHz
- kDefaultVideoBufferDurationInMs = 42 // Low 24fps to reduce stalls
+ // Common 1k samples @44.1kHz
+ kDefaultAudioBufferDurationInMs = 23,
+
+ // Chosen to represent 16fps duration, which will prevent MSE stalls in
+ // videos with frame-rates as low as 8fps.
+ kDefaultVideoBufferDurationInMs = 63
};
+ // Opus packets encode the duration and other parameters in the 5 most
+ // significant bits of the first byte. The index in this array corresponds
+ // to the duration of each frame of the packet in microseconds. See
+ // https://tools.ietf.org/html/rfc6716#page-14
+ static const uint16_t kOpusFrameDurationsMu[];
+
private:
// Helper class that manages per-track state.
class Track {
@@ -67,8 +76,8 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
bool AddBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
// If |last_added_buffer_missing_duration_| is set, updates its duration to
- // be non-kNoTimestamp() value of |estimated_next_frame_duration_| or an
- // arbitrary default, then adds it to |buffers_| and unsets
+ // be non-kNoTimestamp() value of |estimated_next_frame_duration_| or a
+ // hard-coded default, then adds it to |buffers_| and unsets
// |last_added_buffer_missing_duration_|. (This method helps stream parser
// emit all buffers in a media segment before signaling end of segment.)
void ApplyDurationEstimateIfNeeded();
@@ -87,7 +96,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// block is a keyframe.
// |data| contains the bytes in the block.
// |size| indicates the number of bytes in |data|.
- bool IsKeyframe(const uint8* data, int size) const;
+ bool IsKeyframe(const uint8_t* data, int size) const;
base::TimeDelta default_duration() const { return default_duration_; }
@@ -102,6 +111,10 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// ApplyDurationEstimateIfNeeded().
base::TimeDelta GetDurationEstimate();
+ // Counts the number of estimated durations used in this track. Used to
+ // prevent log spam for MEDIA_LOG()s about estimated duration.
+ int num_duration_estimates_;
+
int track_num_;
bool is_video_;
@@ -124,8 +137,10 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
base::TimeDelta default_duration_;
// If kNoTimestamp(), then a default value will be used. This estimate is
- // the maximum duration seen or derived so far for this track, and is valid
- // only if |default_duration_| is kNoTimestamp().
+ // the maximum (for video), or minimum (for audio) duration seen so far for
+ // this track, and is used only if |default_duration_| is kNoTimestamp().
+ // TODO(chcunningham): Use maximum for audio too, adding checks to disable
+ // splicing when these estimates are observed in SourceBufferStream.
base::TimeDelta estimated_next_frame_duration_;
LogCB log_cb_;
@@ -143,6 +158,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
const std::set<int64>& ignored_tracks,
const std::string& audio_encryption_key_id,
const std::string& video_encryption_key_id,
+ const AudioCodec audio_codec_,
const LogCB& log_cb);
~WebMClusterParser() override;
@@ -154,7 +170,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// Returns -1 if the parse fails.
// Returns 0 if more data is needed.
// Returns the number of bytes parsed on success.
- int Parse(const uint8* buf, int size);
+ int Parse(const uint8_t* buf, int size);
base::TimeDelta cluster_start_time() const { return cluster_start_time_; }
@@ -194,14 +210,24 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
WebMParserClient* OnListStart(int id) override;
bool OnListEnd(int id) override;
bool OnUInt(int id, int64 val) override;
- bool OnBinary(int id, const uint8* data, int size) override;
-
- bool ParseBlock(bool is_simple_block, const uint8* buf, int size,
- const uint8* additional, int additional_size, int duration,
+ bool OnBinary(int id, const uint8_t* data, int size) override;
+
+ bool ParseBlock(bool is_simple_block,
+ const uint8_t* buf,
+ int size,
+ const uint8_t* additional,
+ int additional_size,
+ int duration,
int64 discard_padding);
- bool OnBlock(bool is_simple_block, int track_num, int timecode, int duration,
- int flags, const uint8* data, int size,
- const uint8* additional, int additional_size,
+ bool OnBlock(bool is_simple_block,
+ int track_num,
+ int timecode,
+ int duration,
+ int flags,
+ const uint8_t* data,
+ int size,
+ const uint8_t* additional,
+ int additional_size,
int64 discard_padding);
// Resets the Track objects associated with each text track.
@@ -227,21 +253,44 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// if that track num is not a text track.
Track* FindTextTrack(int track_num);
+ // Attempts to read the duration from the encoded audio data, returning as
+ // TimeDelta or kNoTimestamp() if duration cannot be retrieved. This obviously
+ // violates layering rules, but is useful for MSE to know duration in cases
+ // where it isn't explicitly given and cannot be calculated for Blocks at the
+ // end of a Cluster (the next Cluster in playback-order may not be the next
+ // Cluster we parse, so we can't simply use the delta of the first Block in
+ // the next Cluster). Avoid calling if encrypted; may produce unexpected
+ // output. See implementation for supported codecs.
+ base::TimeDelta TryGetEncodedAudioDuration(const uint8_t* data, int size);
+
+ // Reads Opus packet header to determine packet duration. Duration returned
+ // as TimeDelta or kNoTimestamp() upon failure to read duration from packet.
+ base::TimeDelta ReadOpusDuration(const uint8_t* data, int size);
+
+ // Tracks the number of MEDIA_LOGs made in process of reading encoded
+ // duration. Useful to prevent log spam.
+ int num_duration_errors_;
+
double timecode_multiplier_; // Multiplier used to convert timecodes into
// microseconds.
std::set<int64> ignored_tracks_;
std::string audio_encryption_key_id_;
std::string video_encryption_key_id_;
+ const AudioCodec audio_codec_;
WebMListParser parser_;
int64 last_block_timecode_;
- scoped_ptr<uint8[]> block_data_;
+ scoped_ptr<uint8_t[]> block_data_;
int block_data_size_;
int64 block_duration_;
int64 block_add_id_;
- scoped_ptr<uint8[]> block_additional_data_;
+
+ scoped_ptr<uint8_t[]> block_additional_data_;
+ // Must be 0 if |block_additional_data_| is null. Must be > 0 if
+ // |block_additional_data_| is NOT null.
int block_additional_data_size_;
+
int64 discard_padding_;
bool discard_padding_set_;
diff --git a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
index 55dc791c2ce..4020df458bd 100644
--- a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
@@ -4,11 +4,14 @@
#include <algorithm>
#include <cstdlib>
+#include <vector>
#include "base/bind.h"
#include "base/logging.h"
+#include "media/base/audio_decoder_config.h"
#include "media/base/decrypt_config.h"
#include "media/formats/webm/cluster_builder.h"
+#include "media/formats/webm/opus_packet_builder.h"
#include "media/formats/webm/webm_cluster_parser.h"
#include "media/formats/webm/webm_constants.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -31,14 +34,16 @@ enum {
kTestVideoFrameDefaultDurationInMs = 17
};
-COMPILE_ASSERT(
+// Test duration defaults must differ from parser estimation defaults to know
+// which durations parser used when emitting buffers.
+static_assert(
static_cast<int>(kTestAudioFrameDefaultDurationInMs) !=
static_cast<int>(WebMClusterParser::kDefaultAudioBufferDurationInMs),
- test_default_is_same_as_estimation_fallback_audio_duration);
-COMPILE_ASSERT(
+ "test default is the same as estimation fallback audio duration");
+static_assert(
static_cast<int>(kTestVideoFrameDefaultDurationInMs) !=
static_cast<int>(WebMClusterParser::kDefaultVideoBufferDurationInMs),
- test_default_is_same_as_estimation_fallback_video_duration);
+ "test default is the same as estimation fallback video duration");
struct BlockInfo {
int track_num;
@@ -49,21 +54,27 @@ struct BlockInfo {
// this BlockGroup. The absolute value is used for parser verification.
// For simple blocks, this value must be non-negative, and is used only for
// parser verification.
- int duration;
+ double duration;
+
bool use_simple_block;
+
+ // Default data will be used if no data given.
+ const uint8_t* data;
+ int data_length;
};
static const BlockInfo kDefaultBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 34, true }, // Assumes not using DefaultDuration
- { kAudioTrackNum, 46, 23, true },
- { kVideoTrackNum, 67, 33, false },
- { kAudioTrackNum, 69, 23, false },
- { kVideoTrackNum, 100, 33, false },
+ {kAudioTrackNum, 0, 23, true, NULL, 0},
+ {kAudioTrackNum, 23, 23, true, NULL, 0},
+ // Assumes not using DefaultDuration
+ {kVideoTrackNum, 33, 34, true, NULL, 0},
+ {kAudioTrackNum, 46, 23, true, NULL, 0},
+ {kVideoTrackNum, 67, 33, false, NULL, 0},
+ {kAudioTrackNum, 69, 23, false, NULL, 0},
+ {kVideoTrackNum, 100, 33, false, NULL, 0},
};
-static const uint8 kEncryptedFrame[] = {
+static const uint8_t kEncryptedFrame[] = {
0x01, // Block is encrypted
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 // IV
};
@@ -74,27 +85,35 @@ static scoped_ptr<Cluster> CreateCluster(int timecode,
ClusterBuilder cb;
cb.SetClusterTimecode(0);
+ uint8_t kDefaultBlockData[] = { 0x00 };
+
for (int i = 0; i < block_count; i++) {
- uint8 data[] = { 0x00 };
+ const uint8_t* data;
+ int data_length;
+ if (block_info[i].data != NULL) {
+ data = block_info[i].data;
+ data_length = block_info[i].data_length;
+ } else {
+ data = kDefaultBlockData;
+ data_length = sizeof(kDefaultBlockData);
+ }
+
if (block_info[i].use_simple_block) {
CHECK_GE(block_info[i].duration, 0);
- cb.AddSimpleBlock(block_info[i].track_num,
- block_info[i].timestamp,
- 0, data, sizeof(data));
+ cb.AddSimpleBlock(block_info[i].track_num, block_info[i].timestamp, 0,
+ data, data_length);
continue;
}
if (block_info[i].duration < 0) {
cb.AddBlockGroupWithoutBlockDuration(block_info[i].track_num,
- block_info[i].timestamp,
- 0, data, sizeof(data));
+ block_info[i].timestamp, 0, data,
+ data_length);
continue;
}
- cb.AddBlockGroup(block_info[i].track_num,
- block_info[i].timestamp,
- block_info[i].duration,
- 0, data, sizeof(data));
+ cb.AddBlockGroup(block_info[i].track_num, block_info[i].timestamp,
+ block_info[i].duration, 0, data, data_length);
}
return cb.Finish();
@@ -161,7 +180,7 @@ static bool VerifyBuffers(const WebMClusterParser::BufferQueue& audio_buffers,
EXPECT_EQ(block_info[i].timestamp, buffer->timestamp().InMilliseconds());
EXPECT_EQ(std::abs(block_info[i].duration),
- buffer->duration().InMilliseconds());
+ buffer->duration().InMillisecondsF());
EXPECT_EQ(expected_type, buffer->type());
EXPECT_EQ(block_info[i].track_num, buffer->track_id());
}
@@ -212,7 +231,7 @@ static bool VerifyTextBuffers(
const scoped_refptr<StreamParserBuffer> buffer = *buffer_iter++;
EXPECT_EQ(block_info.timestamp, buffer->timestamp().InMilliseconds());
EXPECT_EQ(std::abs(block_info.duration),
- buffer->duration().InMilliseconds());
+ buffer->duration().InMillisecondsF());
EXPECT_EQ(DemuxerStream::TEXT, buffer->type());
EXPECT_EQ(text_track_num, buffer->track_id());
}
@@ -248,6 +267,7 @@ class WebMClusterParserTest : public testing::Test {
std::set<int64>(),
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB())) {}
protected:
@@ -270,6 +290,7 @@ class WebMClusterParserTest : public testing::Test {
std::set<int64>(),
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB()));
}
@@ -305,18 +326,19 @@ TEST_F(WebMClusterParserTest, HeldBackBufferHoldsBackAllTracks) {
std::set<int64>(),
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB()));
const BlockInfo kBlockInfo[] = {
- { kVideoTrackNum, 0, 33, true },
- { kAudioTrackNum, 0, 23, false },
- { kTextTrackNum, 10, 42, false },
- { kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true },
- { kVideoTrackNum, 33, 33, true },
- { kAudioTrackNum, 36, kTestAudioFrameDefaultDurationInMs, true },
- { kVideoTrackNum, 66, 33, true },
- { kAudioTrackNum, 70, kTestAudioFrameDefaultDurationInMs, true },
- { kAudioTrackNum, 83, kTestAudioFrameDefaultDurationInMs, true },
+ {kVideoTrackNum, 0, 33, true, NULL, 0},
+ {kAudioTrackNum, 0, 23, false, NULL, 0},
+ {kTextTrackNum, 10, 42, false, NULL, 0},
+ {kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
+ {kVideoTrackNum, 33, 33, true, NULL, 0},
+ {kAudioTrackNum, 36, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
+ {kVideoTrackNum, 66, 33, true, NULL, 0},
+ {kAudioTrackNum, 70, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
+ {kAudioTrackNum, 83, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
};
const int kExpectedBuffersOnPartialCluster[] = {
@@ -407,7 +429,7 @@ TEST_F(WebMClusterParserTest, ParseClusterWithMultipleCalls) {
WebMClusterParser::BufferQueue video_buffers;
const WebMClusterParser::BufferQueue no_text_buffers;
- const uint8* data = cluster->data();
+ const uint8_t* data = cluster->data();
int size = cluster->size();
int default_parse_size = 3;
int parse_size = std::min(default_parse_size, size);
@@ -444,12 +466,12 @@ TEST_F(WebMClusterParserTest, ParseClusterWithMultipleCalls) {
// one of these scenarios.
TEST_F(WebMClusterParserTest, ParseBlockGroup) {
const BlockInfo kBlockInfo[] = {
- { kAudioTrackNum, 0, 23, false },
- { kVideoTrackNum, 33, 34, false },
+ {kAudioTrackNum, 0, 23, false, NULL, 0},
+ {kVideoTrackNum, 33, 34, false, NULL, 0},
};
int block_count = arraysize(kBlockInfo);
- const uint8 kClusterData[] = {
+ const uint8_t kClusterData[] = {
0x1F, 0x43, 0xB6, 0x75, 0x9B, // Cluster(size=27)
0xE7, 0x81, 0x00, // Timecode(size=1, value=0)
// BlockGroup with BlockDuration before Block.
@@ -470,11 +492,11 @@ TEST_F(WebMClusterParserTest, ParseBlockGroup) {
TEST_F(WebMClusterParserTest, ParseSimpleBlockAndBlockGroupMixture) {
const BlockInfo kBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, false },
- { kVideoTrackNum, 33, 34, true },
- { kAudioTrackNum, 46, 23, false },
- { kVideoTrackNum, 67, 33, false },
+ {kAudioTrackNum, 0, 23, true, NULL, 0},
+ {kAudioTrackNum, 23, 23, false, NULL, 0},
+ {kVideoTrackNum, 33, 34, true, NULL, 0},
+ {kAudioTrackNum, 46, 23, false, NULL, 0},
+ {kVideoTrackNum, 67, 33, false, NULL, 0},
};
int block_count = arraysize(kBlockInfo);
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
@@ -497,24 +519,25 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
ignored_tracks,
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB()));
const BlockInfo kInputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 34, true },
- { kTextTrackNum, 33, 99, true },
- { kAudioTrackNum, 46, 23, true },
- { kVideoTrackNum, 67, 34, true },
+ {kAudioTrackNum, 0, 23, true, NULL, 0},
+ {kAudioTrackNum, 23, 23, true, NULL, 0},
+ {kVideoTrackNum, 33, 34, true, NULL, 0},
+ {kTextTrackNum, 33, 99, true, NULL, 0},
+ {kAudioTrackNum, 46, 23, true, NULL, 0},
+ {kVideoTrackNum, 67, 34, true, NULL, 0},
};
int input_block_count = arraysize(kInputBlockInfo);
const BlockInfo kOutputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 34, true },
- { kAudioTrackNum, 46, 23, true },
- { kVideoTrackNum, 67, 34, true },
+ {kAudioTrackNum, 0, 23, true, NULL, 0},
+ {kAudioTrackNum, 23, 23, true, NULL, 0},
+ {kVideoTrackNum, 33, 34, true, NULL, 0},
+ {kAudioTrackNum, 46, 23, true, NULL, 0},
+ {kVideoTrackNum, 67, 34, true, NULL, 0},
};
int output_block_count = arraysize(kOutputBlockInfo);
@@ -542,16 +565,17 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
std::set<int64>(),
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB()));
const BlockInfo kInputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 34, true },
- { kTextTrackNum, 33, 42, false },
- { kAudioTrackNum, 46, 23, true },
- { kTextTrackNum, 55, 44, false },
- { kVideoTrackNum, 67, 34, true },
+ {kAudioTrackNum, 0, 23, true, NULL, 0},
+ {kAudioTrackNum, 23, 23, true, NULL, 0},
+ {kVideoTrackNum, 33, 34, true, NULL, 0},
+ {kTextTrackNum, 33, 42, false, NULL, 0},
+ {kAudioTrackNum, 46, 23, true, NULL, 0},
+ {kTextTrackNum, 55, 44, false, NULL, 0},
+ {kVideoTrackNum, 67, 34, true, NULL, 0},
};
int input_block_count = arraysize(kInputBlockInfo);
@@ -579,6 +603,7 @@ TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
std::set<int64>(),
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB()));
const BlockInfo kInputBlockInfo[] = {
@@ -616,17 +641,18 @@ TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
std::set<int64>(),
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB()));
const BlockInfo kInputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 34, true },
- { kSubtitleTextTrackNum, 33, 42, false },
- { kAudioTrackNum, 46, 23, true },
- { kCaptionTextTrackNum, 55, 44, false },
- { kVideoTrackNum, 67, 34, true },
- { kSubtitleTextTrackNum, 67, 33, false },
+ {kAudioTrackNum, 0, 23, true, NULL, 0},
+ {kAudioTrackNum, 23, 23, true, NULL, 0},
+ {kVideoTrackNum, 33, 34, true, NULL, 0},
+ {kSubtitleTextTrackNum, 33, 42, false, NULL, 0},
+ {kAudioTrackNum, 46, 23, true, NULL, 0},
+ {kCaptionTextTrackNum, 55, 44, false, NULL, 0},
+ {kVideoTrackNum, 67, 34, true, NULL, 0},
+ {kSubtitleTextTrackNum, 67, 33, false, NULL, 0},
};
int input_block_count = arraysize(kInputBlockInfo);
@@ -662,6 +688,7 @@ TEST_F(WebMClusterParserTest, ParseEncryptedBlock) {
std::set<int64>(),
std::string(),
"video_key_id",
+ kUnknownAudioCodec,
LogCB()));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
@@ -683,13 +710,14 @@ TEST_F(WebMClusterParserTest, ParseBadEncryptedBlock) {
std::set<int64>(),
std::string(),
"video_key_id",
+ kUnknownAudioCodec,
LogCB()));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(-1, result);
}
TEST_F(WebMClusterParserTest, ParseInvalidZeroSizedCluster) {
- const uint8 kBuffer[] = {
+ const uint8_t kBuffer[] = {
0x1F, 0x43, 0xB6, 0x75, 0x80, // CLUSTER (size = 0)
};
@@ -697,7 +725,7 @@ TEST_F(WebMClusterParserTest, ParseInvalidZeroSizedCluster) {
}
TEST_F(WebMClusterParserTest, ParseInvalidUnknownButActuallyZeroSizedCluster) {
- const uint8 kBuffer[] = {
+ const uint8_t kBuffer[] = {
0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = "unknown")
0x1F, 0x43, 0xB6, 0x75, 0x85, // CLUSTER (size = 5)
};
@@ -722,6 +750,7 @@ TEST_F(WebMClusterParserTest, ParseInvalidTextBlockGroupWithoutDuration) {
std::set<int64>(),
std::string(),
std::string(),
+ kUnknownAudioCodec,
LogCB()));
const BlockInfo kBlockInfo[] = {
@@ -741,13 +770,13 @@ TEST_F(WebMClusterParserTest, ParseWithDefaultDurationsSimpleBlocks) {
EXPECT_LT(kTestVideoFrameDefaultDurationInMs, 33);
const BlockInfo kBlockInfo[] = {
- { kAudioTrackNum, 0, kTestAudioFrameDefaultDurationInMs, true },
- { kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true },
- { kVideoTrackNum, 33, kTestVideoFrameDefaultDurationInMs, true },
- { kAudioTrackNum, 46, kTestAudioFrameDefaultDurationInMs, true },
- { kVideoTrackNum, 67, kTestVideoFrameDefaultDurationInMs, true },
- { kAudioTrackNum, 69, kTestAudioFrameDefaultDurationInMs, true },
- { kVideoTrackNum, 100, kTestVideoFrameDefaultDurationInMs, true },
+ {kAudioTrackNum, 0, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
+ {kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
+ {kVideoTrackNum, 33, kTestVideoFrameDefaultDurationInMs, true, NULL, 0},
+ {kAudioTrackNum, 46, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
+ {kVideoTrackNum, 67, kTestVideoFrameDefaultDurationInMs, true, NULL, 0},
+ {kAudioTrackNum, 69, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
+ {kVideoTrackNum, 100, kTestVideoFrameDefaultDurationInMs, true, NULL, 0},
};
int block_count = arraysize(kBlockInfo);
@@ -774,17 +803,21 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
InSequence s;
// Absent DefaultDuration information, SimpleBlock durations are derived from
- // inter-buffer track timestamp delta if within the cluster, and are estimated
- // as the lowest non-zero duration seen so far if the last buffer in the track
- // in the cluster (independently for each track in the cluster).
+ // inter-buffer track timestamp delta if within the cluster. Duration for the
+ // last block in a cluster is estimated independently for each track in the
+ // cluster. For video tracks we use the maximum seen so far. For audio we use
+ // the the minimum.
+ // TODO(chcunningham): Move audio over to use the maximum.
const BlockInfo kBlockInfo1[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 22, true },
- { kVideoTrackNum, 33, 33, true },
- { kAudioTrackNum, 45, 23, true },
- { kVideoTrackNum, 66, 34, true },
- { kAudioTrackNum, 68, 22, true }, // Estimated from minimum audio dur
- { kVideoTrackNum, 100, 33, true }, // Estimated from minimum video dur
+ {kAudioTrackNum, 0, 23, true, NULL, 0},
+ {kAudioTrackNum, 23, 22, true, NULL, 0},
+ {kVideoTrackNum, 33, 33, true, NULL, 0},
+ {kAudioTrackNum, 45, 23, true, NULL, 0},
+ {kVideoTrackNum, 66, 34, true, NULL, 0},
+ // Estimated from minimum audio dur
+ {kAudioTrackNum, 68, 22, true, NULL, 0},
+ // Estimated from maximum video dur
+ {kVideoTrackNum, 100, 34, true, NULL, 0},
};
int block_count1 = arraysize(kBlockInfo1);
@@ -812,8 +845,10 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
// Verify that the estimated frame duration is tracked across clusters for
// each track.
const BlockInfo kBlockInfo2[] = {
- { kAudioTrackNum, 200, 22, true }, // Estimate carries over across clusters
- { kVideoTrackNum, 201, 33, true }, // Estimate carries over across clusters
+ // Estimate carries over across clusters
+ {kAudioTrackNum, 200, 22, true, NULL, 0},
+ // Estimate carries over across clusters
+ {kVideoTrackNum, 201, 34, true, NULL, 0},
};
int block_count2 = arraysize(kBlockInfo2);
@@ -828,17 +863,20 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
// Absent DefaultDuration and BlockDuration information, BlockGroup block
// durations are derived from inter-buffer track timestamp delta if within the
- // cluster, and are estimated as the lowest non-zero duration seen so far if
- // the last buffer in the track in the cluster (independently for each track
- // in the cluster).
- const BlockInfo kBlockInfo1[] = {
- { kAudioTrackNum, 0, -23, false },
- { kAudioTrackNum, 23, -22, false },
- { kVideoTrackNum, 33, -33, false },
- { kAudioTrackNum, 45, -23, false },
- { kVideoTrackNum, 66, -34, false },
- { kAudioTrackNum, 68, -22, false }, // Estimated from minimum audio dur
- { kVideoTrackNum, 100, -33, false }, // Estimated from minimum video dur
+ // cluster. Duration for the last block in a cluster is estimated
+ // independently for each track in the cluster. For video tracks we use the
+ // maximum seen so far. For audio we use the the minimum.
+ // TODO(chcunningham): Move audio over to use the maximum.
+ const BlockInfo kBlockInfo1[] = {
+ {kAudioTrackNum, 0, -23, false, NULL, 0},
+ {kAudioTrackNum, 23, -22, false, NULL, 0},
+ {kVideoTrackNum, 33, -33, false, NULL, 0},
+ {kAudioTrackNum, 45, -23, false, NULL, 0},
+ {kVideoTrackNum, 66, -34, false, NULL, 0},
+ // Estimated from minimum audio dur
+ {kAudioTrackNum, 68, -22, false, NULL, 0},
+ // Estimated from maximum video dur
+ {kVideoTrackNum, 100, -34, false, NULL, 0},
};
int block_count1 = arraysize(kBlockInfo1);
@@ -866,8 +904,8 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
// Verify that the estimated frame duration is tracked across clusters for
// each track.
const BlockInfo kBlockInfo2[] = {
- { kAudioTrackNum, 200, -22, false },
- { kVideoTrackNum, 201, -33, false },
+ {kAudioTrackNum, 200, -22, false, NULL, 0},
+ {kVideoTrackNum, 201, -34, false, NULL, 0},
};
int block_count2 = arraysize(kBlockInfo2);
@@ -887,13 +925,18 @@ TEST_F(WebMClusterParserTest,
EXPECT_LT(kTestVideoFrameDefaultDurationInMs, 33);
const BlockInfo kBlockInfo[] = {
- { kAudioTrackNum, 0, -kTestAudioFrameDefaultDurationInMs, false },
- { kAudioTrackNum, 23, -kTestAudioFrameDefaultDurationInMs, false },
- { kVideoTrackNum, 33, -kTestVideoFrameDefaultDurationInMs, false },
- { kAudioTrackNum, 46, -kTestAudioFrameDefaultDurationInMs, false },
- { kVideoTrackNum, 67, -kTestVideoFrameDefaultDurationInMs, false },
- { kAudioTrackNum, 69, -kTestAudioFrameDefaultDurationInMs, false },
- { kVideoTrackNum, 100, -kTestVideoFrameDefaultDurationInMs, false },
+ {kAudioTrackNum, 0, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
+ {kAudioTrackNum, 23, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
+ {kVideoTrackNum, 33, -kTestVideoFrameDefaultDurationInMs, false, NULL, 0},
+ {kAudioTrackNum, 46, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
+ {kVideoTrackNum, 67, -kTestVideoFrameDefaultDurationInMs, false, NULL, 0},
+ {kAudioTrackNum, 69, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
+ {kVideoTrackNum,
+ 100,
+ -kTestVideoFrameDefaultDurationInMs,
+ false,
+ NULL,
+ 0},
};
int block_count = arraysize(kBlockInfo);
@@ -954,4 +997,98 @@ TEST_F(WebMClusterParserTest,
ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
}
+TEST_F(WebMClusterParserTest, ReadOpusDurationsSimpleBlockAtEndOfCluster) {
+ // Reset parser to expect Opus codec audio.
+ parser_.reset(new WebMClusterParser(
+ kTimecodeScale, kAudioTrackNum, kNoTimestamp(), kVideoTrackNum,
+ kNoTimestamp(), TextTracks(), std::set<int64>(), std::string(),
+ std::string(), kCodecOpus, LogCB()));
+
+ int loop_count = 0;
+ for (const auto* packet_ptr : BuildAllOpusPackets()) {
+ const BlockInfo kBlockInfo[] = {{kAudioTrackNum,
+ 0,
+ packet_ptr->duration_ms(),
+ true, // Make it a SimpleBlock.
+ packet_ptr->data(),
+ packet_ptr->size()}};
+
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+ loop_count++;
+ }
+
+ // Test should minimally cover all the combinations of config and frame count.
+ ASSERT_GE(loop_count, kNumPossibleOpusConfigs * kMaxOpusPacketFrameCount);
+}
+
+TEST_F(WebMClusterParserTest, PreferOpusDurationsOverBlockDurations) {
+ // Reset parser to expect Opus codec audio.
+ parser_.reset(new WebMClusterParser(
+ kTimecodeScale, kAudioTrackNum, kNoTimestamp(), kVideoTrackNum,
+ kNoTimestamp(), TextTracks(), std::set<int64>(), std::string(),
+ std::string(), kCodecOpus, LogCB()));
+
+ int loop_count = 0;
+ for (const auto* packet_ptr : BuildAllOpusPackets()) {
+ // Setting BlockDuration != Opus duration to see which one the parser uses.
+ int block_duration_ms = packet_ptr->duration_ms() + 10;
+
+ BlockInfo block_infos[] = {{kAudioTrackNum,
+ 0,
+ block_duration_ms,
+ false, // Not a SimpleBlock.
+ packet_ptr->data(),
+ packet_ptr->size()}};
+
+ int block_count = arraysize(block_infos);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, block_infos, block_count));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+
+ // BlockInfo duration will be used to verify buffer duration, so changing
+ // duration to be that of the Opus packet to verify it was preferred.
+ block_infos[0].duration = packet_ptr->duration_ms();
+
+ ASSERT_TRUE(VerifyBuffers(parser_, block_infos, block_count));
+ loop_count++;
+ }
+
+ // Test should minimally cover all the combinations of config and frame count.
+ ASSERT_GE(loop_count, kNumPossibleOpusConfigs * kMaxOpusPacketFrameCount);
+}
+
+// Tests that BlockDuration is used to set duration on buffer rather than
+// encoded duration in Opus packet (or hard coded duration estimates). Encoded
+// Opus duration is usually preferred but cannot be known when encrypted.
+TEST_F(WebMClusterParserTest, DontReadEncodedDurationWhenEncrypted) {
+ // Non-empty dummy value signals encryption is active for audio.
+ std::string audio_encryption_id("audio_key_id");
+
+ // Reset parser to expect Opus codec audio and use audio encryption key id.
+ parser_.reset(new WebMClusterParser(
+ kTimecodeScale, kAudioTrackNum, kNoTimestamp(), kVideoTrackNum,
+ kNoTimestamp(), TextTracks(), std::set<int64>(), audio_encryption_id,
+ std::string(), kCodecOpus, LogCB()));
+
+ // Single Block with BlockDuration and encrypted data.
+ const BlockInfo kBlockInfo[] = {{kAudioTrackNum,
+ 0,
+ kTestAudioFrameDefaultDurationInMs,
+ false, // Not a SimpleBlock
+ kEncryptedFrame, // Encrypted frame data
+ arraysize(kEncryptedFrame)}};
+
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+
+ // Will verify that duration of buffer matches that of BlockDuration.
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+}
+
} // namespace media
diff --git a/chromium/media/formats/webm/webm_content_encodings_client.cc b/chromium/media/formats/webm/webm_content_encodings_client.cc
index f2294de7024..7265df94f6e 100644
--- a/chromium/media/formats/webm/webm_content_encodings_client.cc
+++ b/chromium/media/formats/webm/webm_content_encodings_client.cc
@@ -44,7 +44,7 @@ WebMParserClient* WebMContentEncodingsClient::OnListStart(int id) {
if (id == kWebMIdContentEncryption) {
DCHECK(cur_content_encoding_.get());
if (content_encryption_encountered_) {
- MEDIA_LOG(log_cb_) << "Unexpected multiple ContentEncryption.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncryption.";
return NULL;
}
content_encryption_encountered_ = true;
@@ -67,7 +67,7 @@ bool WebMContentEncodingsClient::OnListEnd(int id) {
if (id == kWebMIdContentEncodings) {
// ContentEncoding element is mandatory. Check this!
if (content_encodings_.empty()) {
- MEDIA_LOG(log_cb_) << "Missing ContentEncoding.";
+ MEDIA_LOG(ERROR, log_cb_) << "Missing ContentEncoding.";
return false;
}
content_encodings_ready_ = true;
@@ -85,7 +85,7 @@ bool WebMContentEncodingsClient::OnListEnd(int id) {
// Default value of encoding order is 0, which should only be used on the
// first ContentEncoding.
if (!content_encodings_.empty()) {
- MEDIA_LOG(log_cb_) << "Missing ContentEncodingOrder.";
+ MEDIA_LOG(ERROR, log_cb_) << "Missing ContentEncodingOrder.";
return false;
}
cur_content_encoding_->set_order(0);
@@ -99,15 +99,15 @@ bool WebMContentEncodingsClient::OnListEnd(int id) {
// Check for elements valid in spec but not supported for now.
if (cur_content_encoding_->type() == ContentEncoding::kTypeCompression) {
- MEDIA_LOG(log_cb_) << "ContentCompression not supported.";
+ MEDIA_LOG(ERROR, log_cb_) << "ContentCompression not supported.";
return false;
}
// Enforce mandatory elements without default values.
DCHECK(cur_content_encoding_->type() == ContentEncoding::kTypeEncryption);
if (!content_encryption_encountered_) {
- MEDIA_LOG(log_cb_) << "ContentEncodingType is encryption but"
- << " ContentEncryption is missing.";
+ MEDIA_LOG(ERROR, log_cb_) << "ContentEncodingType is encryption but"
+ << " ContentEncryption is missing.";
return false;
}
@@ -146,13 +146,13 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncodingOrder) {
if (cur_content_encoding_->order() != ContentEncoding::kOrderInvalid) {
- MEDIA_LOG(log_cb_) << "Unexpected multiple ContentEncodingOrder.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncodingOrder.";
return false;
}
if (val != static_cast<int64>(content_encodings_.size())) {
// According to the spec, encoding order starts with 0 and counts upwards.
- MEDIA_LOG(log_cb_) << "Unexpected ContentEncodingOrder.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncodingOrder.";
return false;
}
@@ -162,18 +162,19 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncodingScope) {
if (cur_content_encoding_->scope() != ContentEncoding::kScopeInvalid) {
- MEDIA_LOG(log_cb_) << "Unexpected multiple ContentEncodingScope.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncodingScope.";
return false;
}
if (val == ContentEncoding::kScopeInvalid ||
val > ContentEncoding::kScopeMax) {
- MEDIA_LOG(log_cb_) << "Unexpected ContentEncodingScope.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncodingScope.";
return false;
}
if (val & ContentEncoding::kScopeNextContentEncodingData) {
- MEDIA_LOG(log_cb_) << "Encoded next ContentEncoding is not supported.";
+ MEDIA_LOG(ERROR, log_cb_) << "Encoded next ContentEncoding is not "
+ "supported.";
return false;
}
@@ -183,17 +184,18 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncodingType) {
if (cur_content_encoding_->type() != ContentEncoding::kTypeInvalid) {
- MEDIA_LOG(log_cb_) << "Unexpected multiple ContentEncodingType.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncodingType.";
return false;
}
if (val == ContentEncoding::kTypeCompression) {
- MEDIA_LOG(log_cb_) << "ContentCompression not supported.";
+ MEDIA_LOG(ERROR, log_cb_) << "ContentCompression not supported.";
return false;
}
if (val != ContentEncoding::kTypeEncryption) {
- MEDIA_LOG(log_cb_) << "Unexpected ContentEncodingType " << val << ".";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncodingType " << val
+ << ".";
return false;
}
@@ -204,13 +206,13 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncAlgo) {
if (cur_content_encoding_->encryption_algo() !=
ContentEncoding::kEncAlgoInvalid) {
- MEDIA_LOG(log_cb_) << "Unexpected multiple ContentEncAlgo.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncAlgo.";
return false;
}
if (val < ContentEncoding::kEncAlgoNotEncrypted ||
val > ContentEncoding::kEncAlgoAes) {
- MEDIA_LOG(log_cb_) << "Unexpected ContentEncAlgo " << val << ".";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncAlgo " << val << ".";
return false;
}
@@ -222,12 +224,13 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdAESSettingsCipherMode) {
if (cur_content_encoding_->cipher_mode() !=
ContentEncoding::kCipherModeInvalid) {
- MEDIA_LOG(log_cb_) << "Unexpected multiple AESSettingsCipherMode.";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple AESSettingsCipherMode.";
return false;
}
if (val != ContentEncoding::kCipherModeCtr) {
- MEDIA_LOG(log_cb_) << "Unexpected AESSettingsCipherMode " << val << ".";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected AESSettingsCipherMode " << val
+ << ".";
return false;
}
@@ -250,7 +253,7 @@ bool WebMContentEncodingsClient::OnBinary(int id, const uint8* data, int size) {
if (id == kWebMIdContentEncKeyID) {
if (!cur_content_encoding_->encryption_key_id().empty()) {
- MEDIA_LOG(log_cb_) << "Unexpected multiple ContentEncKeyID";
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncKeyID";
return false;
}
cur_content_encoding_->SetEncryptionKeyId(data, size);
diff --git a/chromium/media/formats/webm/webm_crypto_helpers.h b/chromium/media/formats/webm/webm_crypto_helpers.h
index f7703ea9a8a..41ad5b13edf 100644
--- a/chromium/media/formats/webm/webm_crypto_helpers.h
+++ b/chromium/media/formats/webm/webm_crypto_helpers.h
@@ -11,8 +11,6 @@
namespace media {
-const char kWebMInitDataType[] = "webm";
-
// Fills an initialized DecryptConfig, which can be sent to the Decryptor if
// the stream has potentially encrypted frames. Also sets |data_offset| which
// indicates where the encrypted data starts. Leaving the IV empty will tell
diff --git a/chromium/media/formats/webm/webm_stream_parser.cc b/chromium/media/formats/webm/webm_stream_parser.cc
index 384c9cfdf48..95a4eb01c74 100644
--- a/chromium/media/formats/webm/webm_stream_parser.cc
+++ b/chromium/media/formats/webm/webm_stream_parser.cc
@@ -12,7 +12,6 @@
#include "media/formats/webm/webm_cluster_parser.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_content_encodings.h"
-#include "media/formats/webm/webm_crypto_helpers.h"
#include "media/formats/webm/webm_info_parser.h"
#include "media/formats/webm/webm_tracks_parser.h"
@@ -26,20 +25,21 @@ WebMStreamParser::WebMStreamParser()
WebMStreamParser::~WebMStreamParser() {
}
-void WebMStreamParser::Init(const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool ignore_text_tracks,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
+void WebMStreamParser::Init(
+ const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_tracks,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) {
DCHECK_EQ(state_, kWaitingForInit);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
DCHECK(!config_cb.is_null());
DCHECK(!new_buffers_cb.is_null());
- DCHECK(!need_key_cb.is_null());
+ DCHECK(!encrypted_media_init_data_cb.is_null());
DCHECK(!new_segment_cb.is_null());
DCHECK(!end_of_segment_cb.is_null());
@@ -48,7 +48,7 @@ void WebMStreamParser::Init(const InitCB& init_cb,
config_cb_ = config_cb;
new_buffers_cb_ = new_buffers_cb;
ignore_text_tracks_ = ignore_text_tracks;
- need_key_cb_ = need_key_cb;
+ encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
log_cb_ = log_cb;
@@ -154,7 +154,7 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
break;
case kWebMIdCluster:
if (!cluster_parser_) {
- MEDIA_LOG(log_cb_) << "Found Cluster element before Info.";
+ MEDIA_LOG(ERROR, log_cb_) << "Found Cluster element before Info.";
return -1;
}
ChangeState(kParsingClusters);
@@ -172,7 +172,7 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
// We've found the element we are looking for.
break;
default: {
- MEDIA_LOG(log_cb_) << "Unexpected element ID 0x" << std::hex << id;
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected element ID 0x" << std::hex << id;
return -1;
}
}
@@ -207,20 +207,20 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
if (unknown_segment_size_ && (info_parser.duration() <= 0) &&
!info_parser.date_utc().is_null()) {
- params.liveness = Demuxer::LIVENESS_LIVE;
+ params.liveness = DemuxerStream::LIVENESS_LIVE;
} else if (info_parser.duration() >= 0) {
- params.liveness = Demuxer::LIVENESS_RECORDED;
+ params.liveness = DemuxerStream::LIVENESS_RECORDED;
} else {
- params.liveness = Demuxer::LIVENESS_UNKNOWN;
+ params.liveness = DemuxerStream::LIVENESS_UNKNOWN;
}
const AudioDecoderConfig& audio_config = tracks_parser.audio_decoder_config();
if (audio_config.is_encrypted())
- FireNeedKey(tracks_parser.audio_encryption_key_id());
+ OnEncryptedMediaInitData(tracks_parser.audio_encryption_key_id());
const VideoDecoderConfig& video_config = tracks_parser.video_decoder_config();
if (video_config.is_encrypted())
- FireNeedKey(tracks_parser.video_encryption_key_id());
+ OnEncryptedMediaInitData(tracks_parser.video_encryption_key_id());
if (!config_cb_.Run(audio_config,
video_config,
@@ -239,10 +239,11 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
tracks_parser.ignored_tracks(),
tracks_parser.audio_encryption_key_id(),
tracks_parser.video_encryption_key_id(),
+ audio_config.codec(),
log_cb_));
if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(true, params);
+ base::ResetAndReturn(&init_cb_).Run(params);
return bytes_parsed;
}
@@ -275,9 +276,9 @@ int WebMStreamParser::ParseCluster(const uint8* data, int size) {
return bytes_parsed;
}
-void WebMStreamParser::FireNeedKey(const std::string& key_id) {
+void WebMStreamParser::OnEncryptedMediaInitData(const std::string& key_id) {
std::vector<uint8> key_id_vector(key_id.begin(), key_id.end());
- need_key_cb_.Run(kWebMInitDataType, key_id_vector);
+ encrypted_media_init_data_cb_.Run(EmeInitDataType::WEBM, key_id_vector);
}
} // namespace media
diff --git a/chromium/media/formats/webm/webm_stream_parser.h b/chromium/media/formats/webm/webm_stream_parser.h
index deefa73cca5..e808413c712 100644
--- a/chromium/media/formats/webm/webm_stream_parser.h
+++ b/chromium/media/formats/webm/webm_stream_parser.h
@@ -27,7 +27,7 @@ class WebMStreamParser : public StreamParser {
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
bool ignore_text_tracks,
- const NeedKeyCB& need_key_cb,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) override;
@@ -63,15 +63,15 @@ class WebMStreamParser : public StreamParser {
// Returning > 0 indicates success & the number of bytes parsed.
int ParseCluster(const uint8* data, int size);
- // Fire needkey event through the |need_key_cb_|.
- void FireNeedKey(const std::string& key_id);
+ // Fire needkey event through the |encrypted_media_init_data_cb_|.
+ void OnEncryptedMediaInitData(const std::string& key_id);
State state_;
InitCB init_cb_;
NewConfigCB config_cb_;
NewBuffersCB new_buffers_cb_;
bool ignore_text_tracks_;
- NeedKeyCB need_key_cb_;
+ EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
diff --git a/chromium/media/formats/webm/webm_tracks_parser.cc b/chromium/media/formats/webm/webm_tracks_parser.cc
index 92f044110ed..86bb0a73103 100644
--- a/chromium/media/formats/webm/webm_tracks_parser.cc
+++ b/chromium/media/formats/webm/webm_tracks_parser.cc
@@ -136,9 +136,9 @@ bool WebMTracksParser::OnListEnd(int id) {
if (id == kWebMIdTrackEntry) {
if (track_type_ == -1 || track_num_ == -1) {
- MEDIA_LOG(log_cb_) << "Missing TrackEntry data for "
- << " TrackType " << track_type_
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, log_cb_) << "Missing TrackEntry data for "
+ << " TrackType " << track_type_ << " TrackNum "
+ << track_num_;
return false;
}
@@ -146,7 +146,7 @@ bool WebMTracksParser::OnListEnd(int id) {
track_type_ != kWebMTrackTypeVideo &&
track_type_ != kWebMTrackTypeSubtitlesOrCaptions &&
track_type_ != kWebMTrackTypeDescriptionsOrMetadata) {
- MEDIA_LOG(log_cb_) << "Unexpected TrackType " << track_type_;
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected TrackType " << track_type_;
return false;
}
@@ -154,29 +154,29 @@ bool WebMTracksParser::OnListEnd(int id) {
if (track_type_ == kWebMTrackTypeSubtitlesOrCaptions) {
text_track_kind = CodecIdToTextKind(codec_id_);
if (text_track_kind == kTextNone) {
- MEDIA_LOG(log_cb_) << "Missing TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, log_cb_) << "Missing TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
if (text_track_kind != kTextSubtitles &&
text_track_kind != kTextCaptions) {
- MEDIA_LOG(log_cb_) << "Wrong TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, log_cb_) << "Wrong TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
} else if (track_type_ == kWebMTrackTypeDescriptionsOrMetadata) {
text_track_kind = CodecIdToTextKind(codec_id_);
if (text_track_kind == kTextNone) {
- MEDIA_LOG(log_cb_) << "Missing TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, log_cb_) << "Missing TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
if (text_track_kind != kTextDescriptions &&
text_track_kind != kTextMetadata) {
- MEDIA_LOG(log_cb_) << "Wrong TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, log_cb_) << "Wrong TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
}
@@ -196,7 +196,8 @@ bool WebMTracksParser::OnListEnd(int id) {
audio_encryption_key_id_ = encryption_key_id;
if (default_duration_ == 0) {
- MEDIA_LOG(log_cb_) << "Illegal 0ns audio TrackEntry DefaultDuration";
+ MEDIA_LOG(ERROR, log_cb_) << "Illegal 0ns audio TrackEntry "
+ "DefaultDuration";
return false;
}
audio_default_duration_ = default_duration_;
@@ -208,7 +209,7 @@ bool WebMTracksParser::OnListEnd(int id) {
return false;
}
} else {
- MEDIA_LOG(log_cb_) << "Ignoring audio track " << track_num_;
+ MEDIA_LOG(DEBUG, log_cb_) << "Ignoring audio track " << track_num_;
ignored_tracks_.insert(track_num_);
}
} else if (track_type_ == kWebMTrackTypeVideo) {
@@ -217,7 +218,8 @@ bool WebMTracksParser::OnListEnd(int id) {
video_encryption_key_id_ = encryption_key_id;
if (default_duration_ == 0) {
- MEDIA_LOG(log_cb_) << "Illegal 0ns video TrackEntry DefaultDuration";
+ MEDIA_LOG(ERROR, log_cb_) << "Illegal 0ns video TrackEntry "
+ "DefaultDuration";
return false;
}
video_default_duration_ = default_duration_;
@@ -229,13 +231,13 @@ bool WebMTracksParser::OnListEnd(int id) {
return false;
}
} else {
- MEDIA_LOG(log_cb_) << "Ignoring video track " << track_num_;
+ MEDIA_LOG(DEBUG, log_cb_) << "Ignoring video track " << track_num_;
ignored_tracks_.insert(track_num_);
}
} else if (track_type_ == kWebMTrackTypeSubtitlesOrCaptions ||
track_type_ == kWebMTrackTypeDescriptionsOrMetadata) {
if (ignore_text_tracks_) {
- MEDIA_LOG(log_cb_) << "Ignoring text track " << track_num_;
+ MEDIA_LOG(DEBUG, log_cb_) << "Ignoring text track " << track_num_;
ignored_tracks_.insert(track_num_);
} else {
std::string track_num = base::Int64ToString(track_num_);
@@ -243,7 +245,7 @@ bool WebMTracksParser::OnListEnd(int id) {
text_track_kind, track_name_, track_language_, track_num);
}
} else {
- MEDIA_LOG(log_cb_) << "Unexpected TrackType " << track_type_;
+ MEDIA_LOG(ERROR, log_cb_) << "Unexpected TrackType " << track_type_;
return false;
}
@@ -288,8 +290,8 @@ bool WebMTracksParser::OnUInt(int id, int64 val) {
}
if (*dst != -1) {
- MEDIA_LOG(log_cb_) << "Multiple values for id " << std::hex << id
- << " specified";
+ MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
+ << " specified";
return false;
}
@@ -304,7 +306,7 @@ bool WebMTracksParser::OnFloat(int id, double val) {
bool WebMTracksParser::OnBinary(int id, const uint8* data, int size) {
if (id == kWebMIdCodecPrivate) {
if (!codec_private_.empty()) {
- MEDIA_LOG(log_cb_) << "Multiple CodecPrivate fields in a track.";
+ MEDIA_LOG(ERROR, log_cb_) << "Multiple CodecPrivate fields in a track.";
return false;
}
codec_private_.assign(data, data + size);
@@ -316,7 +318,7 @@ bool WebMTracksParser::OnBinary(int id, const uint8* data, int size) {
bool WebMTracksParser::OnString(int id, const std::string& str) {
if (id == kWebMIdCodecID) {
if (!codec_id_.empty()) {
- MEDIA_LOG(log_cb_) << "Multiple CodecID fields in a track";
+ MEDIA_LOG(ERROR, log_cb_) << "Multiple CodecID fields in a track";
return false;
}
diff --git a/chromium/media/formats/webm/webm_video_client.cc b/chromium/media/formats/webm/webm_video_client.cc
index 592475f092c..9de87e23410 100644
--- a/chromium/media/formats/webm/webm_video_client.cc
+++ b/chromium/media/formats/webm/webm_video_client.cc
@@ -44,7 +44,7 @@ bool WebMVideoClient::InitializeConfig(
video_codec = kCodecVP9;
profile = VP9PROFILE_ANY;
} else {
- MEDIA_LOG(log_cb_) << "Unsupported video codec_id " << codec_id;
+ MEDIA_LOG(ERROR, log_cb_) << "Unsupported video codec_id " << codec_id;
return false;
}
@@ -83,7 +83,8 @@ bool WebMVideoClient::InitializeConfig(
if (display_width_ <= 0 || display_height_ <= 0)
return false;
} else {
- MEDIA_LOG(log_cb_) << "Unsupported display unit type " << display_unit_;
+ MEDIA_LOG(ERROR, log_cb_) << "Unsupported display unit type "
+ << display_unit_;
return false;
}
gfx::Size natural_size = gfx::Size(display_width_, display_height_);
@@ -139,8 +140,9 @@ bool WebMVideoClient::OnUInt(int id, int64 val) {
}
if (*dst != -1) {
- MEDIA_LOG(log_cb_) << "Multiple values for id " << std::hex << id
- << " specified (" << *dst << " and " << val << ")";
+ MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
+ << " specified (" << *dst << " and " << val
+ << ")";
return false;
}
diff --git a/chromium/media/media.gyp b/chromium/media/media.gyp
index 8b2606a2d04..ed6aa6698b6 100644
--- a/chromium/media/media.gyp
+++ b/chromium/media/media.gyp
@@ -40,6 +40,12 @@
}, {
'pkg-config': 'pkg-config'
}],
+ # low memory buffer is used in non-Android based chromecast build due to hardware limitation.
+ ['chromecast==1 and OS!="android"', {
+ 'use_low_memory_buffer%': 1,
+ }, {
+ 'use_low_memory_buffer%': 0,
+ }],
],
},
'includes': [
@@ -134,16 +140,16 @@
'audio/cras/cras_input.h',
'audio/cras/cras_unified.cc',
'audio/cras/cras_unified.h',
- 'audio/fake_audio_consumer.cc',
- 'audio/fake_audio_consumer.h',
'audio/fake_audio_input_stream.cc',
'audio/fake_audio_input_stream.h',
- 'audio/fake_audio_log_factory.h',
'audio/fake_audio_log_factory.cc',
+ 'audio/fake_audio_log_factory.h',
'audio/fake_audio_manager.cc',
'audio/fake_audio_manager.h',
'audio/fake_audio_output_stream.cc',
'audio/fake_audio_output_stream.h',
+ 'audio/fake_audio_worker.cc',
+ 'audio/fake_audio_worker.h',
'audio/linux/audio_manager_linux.cc',
'audio/mac/audio_auhal_mac.cc',
'audio/mac/audio_auhal_mac.h',
@@ -205,10 +211,10 @@
'base/audio_block_fifo.h',
'base/audio_buffer.cc',
'base/audio_buffer.h',
- 'base/audio_buffer_queue.cc',
- 'base/audio_buffer_queue.h',
'base/audio_buffer_converter.cc',
'base/audio_buffer_converter.h',
+ 'base/audio_buffer_queue.cc',
+ 'base/audio_buffer_queue.h',
'base/audio_capturer_source.h',
'base/audio_converter.cc',
'base/audio_converter.h',
@@ -233,6 +239,8 @@
'base/audio_renderer_mixer_input.cc',
'base/audio_renderer_mixer_input.h',
'base/audio_renderer_sink.h',
+ 'base/audio_shifter.cc',
+ 'base/audio_shifter.h',
'base/audio_splicer.cc',
'base/audio_splicer.h',
'base/audio_timestamp_helper.cc',
@@ -251,10 +259,16 @@
'base/byte_queue.h',
'base/cdm_callback_promise.cc',
'base/cdm_callback_promise.h',
+ 'base/cdm_context.cc',
+ 'base/cdm_context.h',
'base/cdm_factory.cc',
'base/cdm_factory.h',
+ 'base/cdm_key_information.cc',
+ 'base/cdm_key_information.h',
'base/cdm_promise.cc',
'base/cdm_promise.h',
+ 'base/cdm_promise_adapter.cc',
+ 'base/cdm_promise_adapter.h',
'base/channel_mixer.cc',
'base/channel_mixer.h',
'base/channel_mixing_matrix.cc',
@@ -284,6 +298,10 @@
'base/eme_constants.h',
'base/key_system_info.cc',
'base/key_system_info.h',
+ 'base/key_systems.cc',
+ 'base/key_systems.h',
+ 'base/key_systems_support_uma.cc',
+ 'base/key_systems_support_uma.h',
'base/keyboard_event_counter.cc',
'base/keyboard_event_counter.h',
'base/mac/avfoundation_glue.h',
@@ -291,10 +309,14 @@
'base/mac/coremedia_glue.h',
'base/mac/coremedia_glue.mm',
'base/mac/corevideo_glue.h',
+ 'base/mac/video_frame_mac.cc',
+ 'base/mac/video_frame_mac.h',
'base/mac/videotoolbox_glue.h',
'base/mac/videotoolbox_glue.mm',
'base/media.cc',
'base/media.h',
+ 'base/media_client.cc',
+ 'base/media_client.h',
'base/media_file_checker.cc',
'base/media_file_checker.h',
'base/media_keys.cc',
@@ -302,10 +324,14 @@
'base/media_log.cc',
'base/media_log.h',
'base/media_log_event.h',
+ 'base/media_permission.cc',
+ 'base/media_permission.h',
'base/media_posix.cc',
'base/media_switches.cc',
'base/media_switches.h',
'base/media_win.cc',
+ 'base/moving_average.cc',
+ 'base/moving_average.h',
'base/multi_channel_resampler.cc',
'base/multi_channel_resampler.h',
'base/pipeline.cc',
@@ -317,9 +343,10 @@
'base/ranges.h',
'base/renderer.cc',
'base/renderer.h',
+ 'base/renderer_factory.cc',
+ 'base/renderer_factory.h',
'base/sample_format.cc',
'base/sample_format.h',
- 'base/scoped_histogram_timer.h',
'base/seekable_buffer.cc',
'base/seekable_buffer.h',
'base/serial_runner.cc',
@@ -330,8 +357,6 @@
'base/simd/convert_yuv_to_rgb_c.cc',
'base/simd/filter_yuv.h',
'base/simd/filter_yuv_c.cc',
- 'base/simd/yuv_to_rgb_table.cc',
- 'base/simd/yuv_to_rgb_table.h',
'base/sinc_resampler.cc',
'base/sinc_resampler.h',
'base/stream_parser.cc',
@@ -355,12 +380,18 @@
'base/user_input_monitor_linux.cc',
'base/user_input_monitor_mac.cc',
'base/user_input_monitor_win.cc',
+ 'base/video_capture_types.cc',
+ 'base/video_capture_types.h',
+ 'base/video_capturer_source.cc',
+ 'base/video_capturer_source.h',
'base/video_decoder.cc',
'base/video_decoder.h',
'base/video_decoder_config.cc',
'base/video_decoder_config.h',
'base/video_frame.cc',
'base/video_frame.h',
+ 'base/video_frame_metadata.cc',
+ 'base/video_frame_metadata.h',
'base/video_frame_pool.cc',
'base/video_frame_pool.h',
'base/video_renderer.cc',
@@ -368,18 +399,26 @@
'base/video_rotation.h',
'base/video_util.cc',
'base/video_util.h',
- 'base/yuv_convert.cc',
- 'base/yuv_convert.h',
'base/wall_clock_time_source.cc',
'base/wall_clock_time_source.h',
+ 'base/yuv_convert.cc',
+ 'base/yuv_convert.h',
+ 'blink/skcanvas_video_renderer.cc',
+ 'blink/skcanvas_video_renderer.h',
'cdm/aes_decryptor.cc',
'cdm/aes_decryptor.h',
+ 'cdm/cenc_utils.cc',
+ 'cdm/cenc_utils.h',
+ 'cdm/default_cdm_factory.cc',
+ 'cdm/default_cdm_factory.h',
'cdm/json_web_key.cc',
'cdm/json_web_key.h',
'cdm/key_system_names.cc',
'cdm/key_system_names.h',
'cdm/player_tracker_impl.cc',
'cdm/player_tracker_impl.h',
+ 'cdm/proxy_decryptor.cc',
+ 'cdm/proxy_decryptor.h',
'ffmpeg/ffmpeg_common.cc',
'ffmpeg/ffmpeg_common.h',
'ffmpeg/ffmpeg_deleters.h',
@@ -389,12 +428,11 @@
'filters/audio_file_reader.h',
'filters/audio_renderer_algorithm.cc',
'filters/audio_renderer_algorithm.h',
- 'filters/audio_renderer_impl.cc',
- 'filters/audio_renderer_impl.h',
'filters/blocking_url_protocol.cc',
'filters/blocking_url_protocol.h',
'filters/chunk_demuxer.cc',
'filters/chunk_demuxer.h',
+ 'filters/context_3d.h',
'filters/decoder_selector.cc',
'filters/decoder_selector.h',
'filters/decoder_stream.cc',
@@ -407,6 +445,8 @@
'filters/decrypting_demuxer_stream.h',
'filters/decrypting_video_decoder.cc',
'filters/decrypting_video_decoder.h',
+ 'filters/default_media_permission.cc',
+ 'filters/default_media_permission.h',
'filters/ffmpeg_audio_decoder.cc',
'filters/ffmpeg_audio_decoder.h',
'filters/ffmpeg_bitstream_converter.h',
@@ -420,8 +460,6 @@
'filters/file_data_source.h',
'filters/frame_processor.cc',
'filters/frame_processor.h',
- 'filters/gpu_video_accelerator_factories.cc',
- 'filters/gpu_video_accelerator_factories.h',
'filters/gpu_video_decoder.cc',
'filters/gpu_video_decoder.h',
'filters/h264_bit_reader.cc',
@@ -430,81 +468,89 @@
'filters/h264_parser.h',
'filters/in_memory_url_protocol.cc',
'filters/in_memory_url_protocol.h',
+ 'filters/jpeg_parser.cc',
+ 'filters/jpeg_parser.h',
'filters/opus_audio_decoder.cc',
'filters/opus_audio_decoder.h',
- 'filters/renderer_impl.cc',
- 'filters/renderer_impl.h',
- 'filters/skcanvas_video_renderer.cc',
- 'filters/skcanvas_video_renderer.h',
- 'filters/source_buffer_platform.cc',
- 'filters/source_buffer_platform.h',
'filters/source_buffer_range.cc',
'filters/source_buffer_range.h',
'filters/source_buffer_stream.cc',
'filters/source_buffer_stream.h',
'filters/stream_parser_factory.cc',
'filters/stream_parser_factory.h',
- 'filters/video_frame_scheduler.h',
- 'filters/video_frame_scheduler_impl.cc',
- 'filters/video_frame_scheduler_impl.h',
- 'filters/video_frame_scheduler_proxy.cc',
- 'filters/video_frame_scheduler_proxy.h',
- 'filters/video_renderer_impl.cc',
- 'filters/video_renderer_impl.h',
+ 'filters/video_cadence_estimator.cc',
+ 'filters/video_cadence_estimator.h',
+ 'filters/video_renderer_algorithm.cc',
+ 'filters/video_renderer_algorithm.h',
+ 'filters/vp8_bool_decoder.cc',
+ 'filters/vp8_bool_decoder.h',
+ 'filters/vp8_parser.cc',
+ 'filters/vp8_parser.h',
'filters/vpx_video_decoder.cc',
'filters/vpx_video_decoder.h',
'filters/webvtt_util.h',
'filters/wsola_internals.cc',
'filters/wsola_internals.h',
- 'midi/midi_manager.cc',
- 'midi/midi_manager.h',
- 'midi/midi_manager_alsa.cc',
- 'midi/midi_manager_alsa.h',
- 'midi/midi_manager_android.cc',
- 'midi/midi_manager_mac.cc',
- 'midi/midi_manager_mac.h',
- 'midi/midi_manager_usb.cc',
- 'midi/midi_manager_usb.h',
- 'midi/midi_manager_win.cc',
- 'midi/midi_manager_win.h',
- 'midi/midi_message_queue.cc',
- 'midi/midi_message_queue.h',
- 'midi/midi_message_util.cc',
- 'midi/midi_message_util.h',
- 'midi/midi_port_info.cc',
- 'midi/midi_port_info.h',
- 'midi/usb_midi_descriptor_parser.cc',
- 'midi/usb_midi_descriptor_parser.h',
- 'midi/usb_midi_device.h',
- 'midi/usb_midi_device_android.cc',
- 'midi/usb_midi_device_android.h',
- 'midi/usb_midi_device_factory_android.cc',
- 'midi/usb_midi_device_factory_android.h',
- 'midi/usb_midi_input_stream.cc',
- 'midi/usb_midi_input_stream.h',
- 'midi/usb_midi_jack.h',
- 'midi/usb_midi_output_stream.cc',
- 'midi/usb_midi_output_stream.h',
+ 'formats/common/offset_byte_queue.cc',
+ 'formats/common/offset_byte_queue.h',
+ 'formats/webm/webm_audio_client.cc',
+ 'formats/webm/webm_audio_client.h',
+ 'formats/webm/webm_cluster_parser.cc',
+ 'formats/webm/webm_cluster_parser.h',
+ 'formats/webm/webm_constants.cc',
+ 'formats/webm/webm_constants.h',
+ 'formats/webm/webm_content_encodings.cc',
+ 'formats/webm/webm_content_encodings.h',
+ 'formats/webm/webm_content_encodings_client.cc',
+ 'formats/webm/webm_content_encodings_client.h',
+ 'formats/webm/webm_crypto_helpers.cc',
+ 'formats/webm/webm_crypto_helpers.h',
+ 'formats/webm/webm_info_parser.cc',
+ 'formats/webm/webm_info_parser.h',
+ 'formats/webm/webm_parser.cc',
+ 'formats/webm/webm_parser.h',
+ 'formats/webm/webm_stream_parser.cc',
+ 'formats/webm/webm_stream_parser.h',
+ 'formats/webm/webm_tracks_parser.cc',
+ 'formats/webm/webm_tracks_parser.h',
+ 'formats/webm/webm_video_client.cc',
+ 'formats/webm/webm_video_client.h',
+ 'formats/webm/webm_webvtt_parser.cc',
'ozone/media_ozone_platform.cc',
'ozone/media_ozone_platform.h',
+ 'renderers/audio_renderer_impl.cc',
+ 'renderers/audio_renderer_impl.h',
+ 'renderers/default_renderer_factory.cc',
+ 'renderers/default_renderer_factory.h',
+ 'renderers/gpu_video_accelerator_factories.h',
+ 'renderers/renderer_impl.cc',
+ 'renderers/renderer_impl.h',
+ 'renderers/video_renderer_impl.cc',
+ 'renderers/video_renderer_impl.h',
'video/capture/android/video_capture_device_android.cc',
'video/capture/android/video_capture_device_android.h',
'video/capture/android/video_capture_device_factory_android.cc',
'video/capture/android/video_capture_device_factory_android.h',
'video/capture/fake_video_capture_device.cc',
'video/capture/fake_video_capture_device.h',
- 'video/capture/fake_video_capture_device_factory.h',
'video/capture/fake_video_capture_device_factory.cc',
+ 'video/capture/fake_video_capture_device_factory.h',
'video/capture/file_video_capture_device.cc',
'video/capture/file_video_capture_device.h',
- 'video/capture/file_video_capture_device_factory.h',
'video/capture/file_video_capture_device_factory.cc',
+ 'video/capture/file_video_capture_device_factory.h',
+ 'video/capture/linux/v4l2_capture_delegate.cc',
+ 'video/capture/linux/v4l2_capture_delegate.h',
+ 'video/capture/linux/v4l2_capture_delegate_multi_plane.cc',
+ 'video/capture/linux/v4l2_capture_delegate_multi_plane.h',
+ 'video/capture/linux/v4l2_capture_delegate_single_plane.cc',
+ 'video/capture/linux/v4l2_capture_delegate_single_plane.h',
+ 'video/capture/linux/video_capture_device_chromeos.cc',
+ 'video/capture/linux/video_capture_device_chromeos.h',
'video/capture/linux/video_capture_device_factory_linux.cc',
'video/capture/linux/video_capture_device_factory_linux.h',
'video/capture/linux/video_capture_device_linux.cc',
'video/capture/linux/video_capture_device_linux.h',
- 'video/capture/linux/video_capture_device_chromeos.cc',
- 'video/capture/linux/video_capture_device_chromeos.h',
'video/capture/mac/platform_video_capturing_mac.h',
'video/capture/mac/video_capture_device_avfoundation_mac.h',
'video/capture/mac/video_capture_device_avfoundation_mac.mm',
@@ -522,8 +568,6 @@
'video/capture/video_capture_device_factory.h',
'video/capture/video_capture_device_info.cc',
'video/capture/video_capture_device_info.h',
- 'video/capture/video_capture_types.cc',
- 'video/capture/video_capture_types.h',
'video/capture/win/capability_list_win.cc',
'video/capture/win/capability_list_win.h',
'video/capture/win/filter_base_win.cc',
@@ -541,37 +585,18 @@
'video/capture/win/video_capture_device_mf_win.h',
'video/capture/win/video_capture_device_win.cc',
'video/capture/win/video_capture_device_win.h',
+ 'video/fake_video_encode_accelerator.cc',
+ 'video/fake_video_encode_accelerator.h',
+ 'video/gpu_memory_buffer_video_frame_pool.cc',
+ 'video/gpu_memory_buffer_video_frame_pool.h',
+ 'video/h264_poc.cc',
+ 'video/h264_poc.h',
'video/picture.cc',
'video/picture.h',
'video/video_decode_accelerator.cc',
'video/video_decode_accelerator.h',
'video/video_encode_accelerator.cc',
'video/video_encode_accelerator.h',
- 'formats/common/offset_byte_queue.cc',
- 'formats/common/offset_byte_queue.h',
- 'formats/webm/webm_audio_client.cc',
- 'formats/webm/webm_audio_client.h',
- 'formats/webm/webm_cluster_parser.cc',
- 'formats/webm/webm_cluster_parser.h',
- 'formats/webm/webm_constants.cc',
- 'formats/webm/webm_constants.h',
- 'formats/webm/webm_content_encodings.cc',
- 'formats/webm/webm_content_encodings.h',
- 'formats/webm/webm_content_encodings_client.cc',
- 'formats/webm/webm_content_encodings_client.h',
- 'formats/webm/webm_crypto_helpers.cc',
- 'formats/webm/webm_crypto_helpers.h',
- 'formats/webm/webm_info_parser.cc',
- 'formats/webm/webm_info_parser.h',
- 'formats/webm/webm_parser.cc',
- 'formats/webm/webm_parser.h',
- 'formats/webm/webm_stream_parser.cc',
- 'formats/webm/webm_stream_parser.h',
- 'formats/webm/webm_tracks_parser.cc',
- 'formats/webm/webm_tracks_parser.h',
- 'formats/webm/webm_video_client.cc',
- 'formats/webm/webm_video_client.h',
- 'formats/webm/webm_webvtt_parser.cc',
'formats/webm/webm_webvtt_parser.h'
],
'direct_dependent_settings': {
@@ -621,12 +646,23 @@
'filters/in_memory_url_protocol.cc',
'filters/in_memory_url_protocol.h',
],
+ 'defines': [
+ 'MEDIA_DISABLE_FFMPEG',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'MEDIA_DISABLE_FFMPEG',
+ ],
+ },
}],
['media_use_libvpx==1', {
'dependencies': [
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
],
}, { # media_use_libvpx==0
+ 'defines': [
+ 'MEDIA_DISABLE_LIBVPX',
+ ],
'direct_dependent_settings': {
'defines': [
'MEDIA_DISABLE_LIBVPX',
@@ -642,12 +678,14 @@
'sources': [
'base/browser_cdm.cc',
'base/browser_cdm.h',
+ 'base/browser_cdm_factory.cc',
'base/browser_cdm_factory.h',
],
}],
['OS=="android"', {
'dependencies': [
'media_android_jni_headers',
+ 'media_java',
'player_android',
'video_capture_android_jni_headers',
],
@@ -659,20 +697,15 @@
'sources!': [
'filters/opus_audio_decoder.cc',
'filters/opus_audio_decoder.h',
- ],
- 'conditions': [
- ['android_webview_build==0', {
- 'dependencies': [
- 'media_java',
- ],
- }],
+ 'renderers/default_renderer_factory.cc',
+ 'renderers/default_renderer_factory.h',
],
'defines': [
'DISABLE_USER_INPUT_MONITOR',
],
}],
# For VaapiVideoEncodeAccelerator.
- ['target_arch != "arm" and chromeos == 1 and use_x11 == 1', {
+ ['target_arch != "arm" and chromeos == 1', {
'sources': [
'filters/h264_bitstream_buffer.cc',
'filters/h264_bitstream_buffer.h',
@@ -698,6 +731,11 @@
'audio/openbsd/audio_manager_openbsd.cc',
'audio/openbsd/audio_manager_openbsd.h',
],
+ }, { # else: openbsd==1
+ 'sources!': [
+ 'video/capture/linux/v4l2_capture_delegate_multi_plane.cc',
+ 'video/capture/linux/v4l2_capture_delegate_multi_plane.h',
+ ],
}],
['OS=="linux"', {
'conditions': [
@@ -708,6 +746,7 @@
'../build/linux/system.gyp:xext',
'../build/linux/system.gyp:xfixes',
'../build/linux/system.gyp:xtst',
+ '../ui/gfx/x/gfx_x11.gyp:gfx_x11',
],
}, { # else: use_x11==0
'sources!': [
@@ -892,7 +931,6 @@
'$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
'$(SDKROOT)/System/Library/Frameworks/AudioUnit.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreMIDI.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreVideo.framework',
'$(SDKROOT)/System/Library/Frameworks/OpenGL.framework',
'$(SDKROOT)/System/Library/Frameworks/QTKit.framework',
@@ -994,10 +1032,10 @@
'formats/mpeg/adts_constants.h',
'formats/mpeg/adts_stream_parser.cc',
'formats/mpeg/adts_stream_parser.h',
- 'formats/mpeg/mpeg_audio_stream_parser_base.cc',
- 'formats/mpeg/mpeg_audio_stream_parser_base.h',
'formats/mpeg/mpeg1_audio_stream_parser.cc',
'formats/mpeg/mpeg1_audio_stream_parser.h',
+ 'formats/mpeg/mpeg_audio_stream_parser_base.cc',
+ 'formats/mpeg/mpeg_audio_stream_parser_base.h',
],
}],
['target_arch=="ia32" or target_arch=="x64"', {
@@ -1015,6 +1053,17 @@
'base/keyboard_event_counter.h',
],
}],
+ ['use_low_memory_buffer==1', {
+ 'sources': [
+ 'filters/source_buffer_platform.h',
+ 'filters/source_buffer_platform_lowmem.cc',
+ ]
+ }, { # 'use_low_memory_buffer==0'
+ 'sources': [
+ 'filters/source_buffer_platform.cc',
+ 'filters/source_buffer_platform.h',
+ ]
+ }],
], # conditions
'target_conditions': [
['OS == "ios" and _toolset != "host"', {
@@ -1026,140 +1075,18 @@
['include', '^base/mac/corevideo_glue\\.h$'],
['include', '^base/mac/videotoolbox_glue\\.h$'],
['include', '^base/mac/videotoolbox_glue\\.mm$'],
+ ['include', '^base/mac/video_frame_mac\\.h$'],
+ ['include', '^base/mac/video_frame_mac\\.cc$'],
],
}],
], # target_conditions
},
{
- # GN version: //media/mojo/interfaces
- 'target_name': 'media_mojo_bindings',
- 'type': 'static_library',
- 'sources': [
- 'mojo/interfaces/media_types.mojom',
- 'mojo/interfaces/media_renderer.mojom',
- 'mojo/interfaces/demuxer_stream.mojom',
- ],
- 'includes': [
- '../mojo/public/tools/bindings/mojom_bindings_generator.gypi'
- ],
- 'export_dependent_settings': [
- '../mojo/public/mojo_public.gyp:mojo_cpp_bindings',
- '../mojo/services/public/mojo_services_public.gyp:mojo_geometry_bindings',
- ],
- 'dependencies': [
- '../mojo/public/mojo_public.gyp:mojo_cpp_bindings',
- '../mojo/services/public/mojo_services_public.gyp:mojo_geometry_bindings',
- ],
- },
- {
- 'target_name': 'media_mojo_lib',
- 'type': 'static_library',
- 'includes': [
- '../mojo/mojo_variables.gypi',
- ],
- 'dependencies': [
- 'media',
- 'media_mojo_bindings',
- '../base/base.gyp:base',
- '../mojo/mojo_geometry_converters.gyp:mojo_geometry_lib',
- '../mojo/mojo_base.gyp:mojo_environment_chromium',
- '../mojo/public/mojo_public.gyp:mojo_application_base',
- '../mojo/public/mojo_public.gyp:mojo_application_bindings',
- '../skia/skia.gyp:skia',
- '../ui/gfx/gfx.gyp:gfx_geometry',
- '<(mojo_system_for_component)',
- ],
- 'export_dependent_settings': [
- 'media_mojo_bindings',
- ],
- 'sources': [
- 'mojo/services/media_type_converters.cc',
- 'mojo/services/media_type_converters.h',
- 'mojo/services/mojo_demuxer_stream_impl.cc',
- 'mojo/services/mojo_demuxer_stream_impl.h',
- 'mojo/services/mojo_renderer_impl.cc',
- 'mojo/services/mojo_renderer_impl.h',
- ],
- },
- {
- 'target_name': 'mojo_media_renderer_app',
- 'type': 'loadable_module',
- 'includes': [
- '../mojo/mojo_variables.gypi',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- '../mojo/mojo_base.gyp:mojo_application_chromium',
- '<(mojo_system_for_loadable_module)',
- 'media_mojo_lib',
- 'shared_memory_support',
- ],
- 'sources': [
- 'mojo/services/demuxer_stream_provider_shim.cc',
- 'mojo/services/demuxer_stream_provider_shim.h',
- 'mojo/services/mojo_demuxer_stream_adapter.cc',
- 'mojo/services/mojo_demuxer_stream_adapter.h',
- 'mojo/services/mojo_renderer_service.cc',
- 'mojo/services/mojo_renderer_service.h',
- 'mojo/services/renderer_config.cc',
- 'mojo/services/renderer_config.h',
- 'mojo/services/renderer_config_default.cc',
- ],
- },
- {
- 'target_name': 'media_mojo_lib_unittests',
- 'type': '<(gtest_target_type)',
- 'dependencies': [
- 'media',
- 'media_mojo_bindings',
- 'media_mojo_lib',
- '../base/base.gyp:base',
- '../base/base.gyp:test_support_base',
- '../testing/gtest.gyp:gtest',
- '../mojo/edk/mojo_edk.gyp:mojo_run_all_unittests',
- '../mojo/mojo_base.gyp:mojo_environment_chromium',
- ],
- 'sources': [
- 'mojo/services/media_type_converters_unittest.cc',
- ],
- },
- {
- 'target_name': 'mojo_media_renderer_apptest',
- 'type': 'loadable_module',
- 'includes': [
- '../mojo/mojo_variables.gypi',
- ],
- 'dependencies': [
- 'media',
- 'media_mojo_bindings',
- 'media_mojo_lib',
- 'media_test_support',
- 'mojo_media_renderer_app',
- '../base/base.gyp:base',
- '../base/base.gyp:test_support_base',
- '../testing/gtest.gyp:gtest',
- '../mojo/mojo_base.gyp:mojo_application_chromium',
- '<(mojo_system_for_loadable_module)',
- ],
- 'sources': [
- 'mojo/services/renderer_unittest.cc',
- ],
- },
- {
- 'target_name': 'media_mojo',
- 'type': 'none',
- 'dependencies': [
- 'media_mojo_lib',
- 'media_mojo_lib_unittests',
- 'mojo_media_renderer_apptest',
- 'mojo_media_renderer_app',
- ]
- },
- {
# GN version: //media:media_unittests
'target_name': 'media_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
+ 'audio_test_config',
'media',
'media_test_support',
'shared_memory_support',
@@ -1167,6 +1094,7 @@
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_base',
'../gpu/gpu.gyp:command_buffer_common',
+ '../gpu/gpu.gyp:gpu_unittest_utils',
'../skia/skia.gyp:skia',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
@@ -1177,42 +1105,13 @@
'../url/url.gyp:url_lib',
],
'sources': [
- 'audio/android/audio_android_unittest.cc',
- 'audio/alsa/alsa_output_unittest.cc',
- 'audio/audio_input_controller_unittest.cc',
- 'audio/audio_input_unittest.cc',
- 'audio/audio_input_volume_unittest.cc',
- 'audio/audio_low_latency_input_output_unittest.cc',
- 'audio/audio_manager_unittest.cc',
- 'audio/audio_output_controller_unittest.cc',
- 'audio/audio_output_device_unittest.cc',
- 'audio/audio_output_proxy_unittest.cc',
- 'audio/audio_parameters_unittest.cc',
- 'audio/audio_power_monitor_unittest.cc',
- 'audio/fake_audio_consumer_unittest.cc',
- 'audio/mac/audio_auhal_mac_unittest.cc',
- 'audio/mac/audio_device_listener_mac_unittest.cc',
- 'audio/mac/audio_low_latency_input_mac_unittest.cc',
- 'audio/simple_sources_unittest.cc',
- 'audio/sounds/audio_stream_handler_unittest.cc',
- 'audio/sounds/sounds_manager_unittest.cc',
- 'audio/sounds/test_data.cc',
- 'audio/sounds/test_data.h',
- 'audio/sounds/wav_audio_handler_unittest.cc',
- 'audio/virtual_audio_input_stream_unittest.cc',
- 'audio/virtual_audio_output_stream_unittest.cc',
- 'audio/win/audio_device_listener_win_unittest.cc',
- 'audio/win/audio_low_latency_input_win_unittest.cc',
- 'audio/win/audio_low_latency_output_win_unittest.cc',
- 'audio/win/audio_output_win_unittest.cc',
- 'audio/win/core_audio_util_win_unittest.cc',
'base/android/media_codec_bridge_unittest.cc',
'base/android/media_drm_bridge_unittest.cc',
'base/android/media_source_player_unittest.cc',
'base/audio_block_fifo_unittest.cc',
'base/audio_buffer_converter_unittest.cc',
- 'base/audio_buffer_unittest.cc',
'base/audio_buffer_queue_unittest.cc',
+ 'base/audio_buffer_unittest.cc',
'base/audio_bus_unittest.cc',
'base/audio_converter_unittest.cc',
'base/audio_discard_helper_unittest.cc',
@@ -1222,6 +1121,7 @@
'base/audio_pull_fifo_unittest.cc',
'base/audio_renderer_mixer_input_unittest.cc',
'base/audio_renderer_mixer_unittest.cc',
+ 'base/audio_shifter_unittest.cc',
'base/audio_splicer_unittest.cc',
'base/audio_timestamp_helper_unittest.cc',
'base/audio_video_metadata_extractor_unittest.cc',
@@ -1236,15 +1136,19 @@
'base/decoder_buffer_queue_unittest.cc',
'base/decoder_buffer_unittest.cc',
'base/djb2_unittest.cc',
+ 'base/fake_demuxer_stream_unittest.cc',
'base/gmock_callback_support_unittest.cc',
+ 'base/key_systems_unittest.cc',
+ 'base/mac/video_frame_mac_unittests.cc',
'base/media_file_checker_unittest.cc',
+ 'base/moving_average_unittest.cc',
'base/multi_channel_resampler_unittest.cc',
+ 'base/null_video_sink_unittest.cc',
'base/pipeline_unittest.cc',
'base/ranges_unittest.cc',
'base/run_all_unittests.cc',
- 'base/scoped_histogram_timer_unittest.cc',
- 'base/serial_runner_unittest.cc',
'base/seekable_buffer_unittest.cc',
+ 'base/serial_runner_unittest.cc',
'base/sinc_resampler_unittest.cc',
'base/stream_parser_unittest.cc',
'base/text_ranges_unittest.cc',
@@ -1253,28 +1157,26 @@
'base/user_input_monitor_unittest.cc',
'base/vector_math_testing.h',
'base/vector_math_unittest.cc',
- 'base/video_frame_unittest.cc',
'base/video_frame_pool_unittest.cc',
+ 'base/video_frame_unittest.cc',
'base/video_util_unittest.cc',
'base/wall_clock_time_source_unittest.cc',
'base/yuv_convert_unittest.cc',
+ 'blink/skcanvas_video_renderer_unittest.cc',
'cdm/aes_decryptor_unittest.cc',
+ 'cdm/cenc_utils_unittest.cc',
'cdm/json_web_key_unittest.cc',
'ffmpeg/ffmpeg_common_unittest.cc',
'filters/audio_clock_unittest.cc',
- 'filters/audio_decoder_unittest.cc',
'filters/audio_decoder_selector_unittest.cc',
+ 'filters/audio_decoder_unittest.cc',
'filters/audio_file_reader_unittest.cc',
'filters/audio_renderer_algorithm_unittest.cc',
- 'filters/audio_renderer_impl_unittest.cc',
'filters/blocking_url_protocol_unittest.cc',
'filters/chunk_demuxer_unittest.cc',
'filters/decrypting_audio_decoder_unittest.cc',
'filters/decrypting_demuxer_stream_unittest.cc',
'filters/decrypting_video_decoder_unittest.cc',
- 'filters/fake_demuxer_stream.cc',
- 'filters/fake_demuxer_stream.h',
- 'filters/fake_demuxer_stream_unittest.cc',
'filters/fake_video_decoder.cc',
'filters/fake_video_decoder.h',
'filters/fake_video_decoder_unittest.cc',
@@ -1286,28 +1188,19 @@
'filters/h264_bit_reader_unittest.cc',
'filters/h264_parser_unittest.cc',
'filters/in_memory_url_protocol_unittest.cc',
- 'filters/pipeline_integration_test.cc',
- 'filters/pipeline_integration_test_base.cc',
- 'filters/renderer_impl_unittest.cc',
- 'filters/skcanvas_video_renderer_unittest.cc',
+ 'filters/jpeg_parser_unittest.cc',
'filters/source_buffer_stream_unittest.cc',
+ 'filters/video_cadence_estimator_unittest.cc',
'filters/video_decoder_selector_unittest.cc',
- 'filters/video_frame_scheduler_impl_unittest.cc',
- 'filters/video_frame_scheduler_unittest.cc',
'filters/video_frame_stream_unittest.cc',
- 'filters/video_renderer_impl_unittest.cc',
- 'midi/midi_manager_unittest.cc',
- 'midi/midi_manager_usb_unittest.cc',
- 'midi/midi_message_queue_unittest.cc',
- 'midi/midi_message_util_unittest.cc',
- 'midi/usb_midi_descriptor_parser_unittest.cc',
- 'midi/usb_midi_input_stream_unittest.cc',
- 'midi/usb_midi_output_stream_unittest.cc',
- 'video/capture/fake_video_capture_device_unittest.cc',
- 'video/capture/video_capture_device_unittest.cc',
+ 'filters/video_renderer_algorithm_unittest.cc',
+ 'filters/vp8_bool_decoder_unittest.cc',
+ 'filters/vp8_parser_unittest.cc',
'formats/common/offset_byte_queue_unittest.cc',
'formats/webm/cluster_builder.cc',
'formats/webm/cluster_builder.h',
+ 'formats/webm/opus_packet_builder.cc',
+ 'formats/webm/opus_packet_builder.h',
'formats/webm/tracks_builder.cc',
'formats/webm/tracks_builder.h',
'formats/webm/webm_cluster_parser_unittest.cc',
@@ -1315,6 +1208,15 @@
'formats/webm/webm_parser_unittest.cc',
'formats/webm/webm_tracks_parser_unittest.cc',
'formats/webm/webm_webvtt_parser_unittest.cc',
+ 'renderers/audio_renderer_impl_unittest.cc',
+ 'renderers/renderer_impl_unittest.cc',
+ 'renderers/video_renderer_impl_unittest.cc',
+ 'test/pipeline_integration_test.cc',
+ 'test/pipeline_integration_test_base.cc',
+ 'video/capture/fake_video_capture_device_unittest.cc',
+ 'video/capture/video_capture_device_unittest.cc',
+ 'video/h264_poc_unittest.cc',
+ 'video/gpu_memory_buffer_video_frame_pool_unittest.cc',
],
'include_dirs': [
# Needed by media_drm_bridge.cc.
@@ -1345,18 +1247,8 @@
'filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc',
'filters/ffmpeg_video_decoder_unittest.cc',
'filters/in_memory_url_protocol_unittest.cc',
- 'filters/pipeline_integration_test.cc',
- 'filters/pipeline_integration_test_base.cc',
- ],
- }],
- ['use_alsa==1', {
- 'defines': [
- 'USE_ALSA',
- ],
- }],
- ['use_pulseaudio==1', {
- 'defines': [
- 'USE_PULSEAUDIO',
+ 'test/pipeline_integration_test.cc',
+ 'test/pipeline_integration_test_base.cc',
],
}],
['os_posix==1 and OS!="mac"', {
@@ -1369,38 +1261,16 @@
],
}],
['OS=="android"', {
- 'sources!': [
- 'audio/audio_input_volume_unittest.cc',
- ],
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
'player_android',
],
}],
- ['OS=="linux"', {
- 'conditions': [
- ['use_cras==1', {
- 'sources': [
- 'audio/cras/cras_input_unittest.cc',
- 'audio/cras/cras_unified_unittest.cc',
- ],
- 'defines': [
- 'USE_CRAS',
- ],
- }],
- ],
- }],
['target_arch != "arm" and chromeos == 1 and use_x11 == 1', {
'sources': [
'filters/h264_bitstream_buffer_unittest.cc',
],
}],
- ['use_alsa==0', {
- 'sources!': [
- 'audio/alsa/alsa_output_unittest.cc',
- 'audio/audio_low_latency_input_output_unittest.cc',
- ],
- }],
['target_arch=="ia32" or target_arch=="x64"', {
'sources': [
'base/simd/convert_rgb_to_yuv_unittest.cc',
@@ -1432,16 +1302,16 @@
'formats/mpeg/mpeg1_audio_stream_parser_unittest.cc',
],
}],
- # TODO(wolenetz): Fix size_t to int truncations in win64. See
- # http://crbug.com/171009
- ['OS=="win" and target_arch=="x64"', {
- 'msvs_disabled_warnings': [ 4267, ],
- }],
['OS=="mac"', {
'sources': [
'video/capture/mac/video_capture_device_factory_mac_unittest.mm',
]
}],
+ ['use_x11==1', {
+ 'dependencies': [
+ '../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
+ ],
+ }],
],
},
{
@@ -1468,8 +1338,8 @@
'base/sinc_resampler_perftest.cc',
'base/vector_math_perftest.cc',
'base/yuv_convert_perftest.cc',
- 'filters/pipeline_integration_perftest.cc',
- 'filters/pipeline_integration_test_base.cc',
+ 'test/pipeline_integration_perftest.cc',
+ 'test/pipeline_integration_test_base.cc',
],
'conditions': [
['arm_neon==1', {
@@ -1490,14 +1360,120 @@
}, { # media_use_ffmpeg==0
'sources!': [
'base/demuxer_perftest.cc',
- 'filters/pipeline_integration_perftest.cc',
- 'filters/pipeline_integration_test_base.cc',
+ 'test/pipeline_integration_perftest.cc',
+ 'test/pipeline_integration_test_base.cc',
],
}],
],
},
{
- # GN version: //media:test_support
+ # GN version: //media/audio:unittests
+ # For including the sources and configs in multiple test targets.
+ 'target_name': 'audio_test_config',
+ 'type': 'none',
+ 'direct_dependent_settings': {
+ 'sources': [
+ 'audio/audio_input_controller_unittest.cc',
+ 'audio/audio_input_unittest.cc',
+ 'audio/audio_manager_factory_unittest.cc',
+ 'audio/audio_manager_unittest.cc',
+ 'audio/audio_output_controller_unittest.cc',
+ 'audio/audio_output_device_unittest.cc',
+ 'audio/audio_output_proxy_unittest.cc',
+ 'audio/audio_parameters_unittest.cc',
+ 'audio/audio_power_monitor_unittest.cc',
+ 'audio/fake_audio_worker_unittest.cc',
+ 'audio/simple_sources_unittest.cc',
+ 'audio/virtual_audio_input_stream_unittest.cc',
+ 'audio/virtual_audio_output_stream_unittest.cc',
+ ],
+ 'conditions': [
+ # TODO(wolenetz): Fix size_t to int truncations in win64. See
+ # http://crbug.com/171009
+ ['OS=="win" and target_arch=="x64"', {
+ 'msvs_disabled_warnings': [ 4267, ],
+ }],
+ ['OS=="android"', {
+ 'sources': [
+ 'audio/android/audio_android_unittest.cc',
+ ],
+ }, {
+ 'sources': [
+ 'audio/audio_input_volume_unittest.cc',
+ ],
+ }],
+ ['OS=="mac"', {
+ 'sources': [
+ 'audio/mac/audio_auhal_mac_unittest.cc',
+ 'audio/mac/audio_device_listener_mac_unittest.cc',
+ 'audio/mac/audio_low_latency_input_mac_unittest.cc',
+ ],
+ }],
+ ['chromeos==1', {
+ 'sources': [
+ 'audio/sounds/audio_stream_handler_unittest.cc',
+ 'audio/sounds/sounds_manager_unittest.cc',
+ 'audio/sounds/test_data.cc',
+ 'audio/sounds/test_data.h',
+ 'audio/sounds/wav_audio_handler_unittest.cc',
+ ],
+ }],
+ ['OS=="win"', {
+ 'sources': [
+ 'audio/win/audio_device_listener_win_unittest.cc',
+ 'audio/win/audio_low_latency_input_win_unittest.cc',
+ 'audio/win/audio_low_latency_output_win_unittest.cc',
+ 'audio/win/audio_output_win_unittest.cc',
+ 'audio/win/core_audio_util_win_unittest.cc',
+ ],
+ }],
+ ['use_alsa==1', {
+ 'sources': [
+ 'audio/alsa/alsa_output_unittest.cc',
+ 'audio/audio_low_latency_input_output_unittest.cc',
+ ],
+ 'defines': [
+ 'USE_ALSA',
+ ],
+ }],
+ ['use_pulseaudio==1', {
+ 'defines': [
+ 'USE_PULSEAUDIO',
+ ],
+ }],
+ ['use_cras==1', {
+ 'sources': [
+ 'audio/cras/cras_input_unittest.cc',
+ 'audio/cras/cras_unified_unittest.cc',
+ ],
+ 'defines': [
+ 'USE_CRAS',
+ ],
+ }],
+ ],
+ },
+ },
+ {
+ # GN version: //media:audio_unittests
+ # For running the subset of media_unittests that might require audio
+ # hardware separately on GPU bots. media_unittests includes these too.
+ 'target_name': 'audio_unittests',
+ 'type': '<(gtest_target_type)',
+ 'dependencies': [
+ 'audio_test_config',
+ 'media_test_support',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../ui/gfx/gfx.gyp:gfx_test_support',
+ ],
+ 'sources': [
+ 'base/run_all_unittests.cc',
+ ],
+ },
+ {
+ # GN versions (it is split apart): //media:test_support,
+ # //media/base:test_support, and //media/audio:test_support
'target_name': 'media_test_support',
'type': 'static_library',
'dependencies': [
@@ -1509,6 +1485,8 @@
'../testing/gtest.gyp:gtest',
],
'sources': [
+ 'audio/audio_unittest_util.cc',
+ 'audio/audio_unittest_util.h',
'audio/mock_audio_manager.cc',
'audio/mock_audio_manager.h',
'audio/mock_audio_source_callback.cc',
@@ -1519,6 +1497,8 @@
'base/fake_audio_render_callback.h',
'base/fake_audio_renderer_sink.cc',
'base/fake_audio_renderer_sink.h',
+ 'base/fake_demuxer_stream.cc',
+ 'base/fake_demuxer_stream.h',
'base/fake_text_track_stream.cc',
'base/fake_text_track_stream.h',
'base/gmock_callback_support.h',
@@ -1528,16 +1508,14 @@
'base/mock_demuxer_host.h',
'base/mock_filters.cc',
'base/mock_filters.h',
+ 'base/null_video_sink.cc',
+ 'base/null_video_sink.h',
'base/test_data_util.cc',
'base/test_data_util.h',
'base/test_helpers.cc',
'base/test_helpers.h',
- 'filters/clockless_video_frame_scheduler.cc',
- 'filters/clockless_video_frame_scheduler.h',
- 'filters/mock_gpu_video_accelerator_factories.cc',
- 'filters/mock_gpu_video_accelerator_factories.h',
- 'filters/test_video_frame_scheduler.cc',
- 'filters/test_video_frame_scheduler.h',
+ 'renderers/mock_gpu_video_accelerator_factories.cc',
+ 'renderers/mock_gpu_video_accelerator_factories.h',
'video/mock_video_decode_accelerator.cc',
'video/mock_video_decode_accelerator.h',
],
@@ -1662,6 +1640,7 @@
],
},
{
+ # GN version: //media/base:media_sse2
'target_name': 'media_sse2',
'type': 'static_library',
'cflags': [
@@ -1681,44 +1660,6 @@
},
], # targets
}],
- ['use_x11==1', {
- 'targets': [
- {
- 'target_name': 'player_x11',
- 'type': 'executable',
- 'dependencies': [
- 'media',
- 'shared_memory_support',
- '../base/base.gyp:base',
- '../ui/gl/gl.gyp:gl',
- '../ui/gfx/gfx.gyp:gfx',
- '../ui/gfx/gfx.gyp:gfx_geometry',
- '../build/linux/system.gyp:x11',
- '../build/linux/system.gyp:xext',
- '../build/linux/system.gyp:xrender',
- ],
- 'conditions': [
- # Linux/Solaris need libdl for dlopen() and friends.
- ['OS=="linux" or OS=="solaris"', {
- 'link_settings': {
- 'libraries': [
- '-ldl',
- ],
- },
- }],
- ],
- 'sources': [
- 'tools/player_x11/data_source_logger.cc',
- 'tools/player_x11/data_source_logger.h',
- 'tools/player_x11/gl_video_renderer.cc',
- 'tools/player_x11/gl_video_renderer.h',
- 'tools/player_x11/player_x11.cc',
- 'tools/player_x11/x11_video_renderer.cc',
- 'tools/player_x11/x11_video_renderer.h',
- ],
- },
- ],
- }],
['OS=="android"', {
'targets': [
{
@@ -1758,8 +1699,6 @@
'base/android/java/src/org/chromium/media/MediaDrmBridge.java',
'base/android/java/src/org/chromium/media/MediaPlayerBridge.java',
'base/android/java/src/org/chromium/media/MediaPlayerListener.java',
- 'base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java',
- 'base/android/java/src/org/chromium/media/UsbMidiDeviceFactoryAndroid.java',
'base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java',
],
'variables': {
@@ -1788,16 +1727,23 @@
'base/android/audio_decoder_job.cc',
'base/android/audio_decoder_job.h',
'base/android/browser_cdm_factory_android.cc',
+ 'base/android/browser_cdm_factory_android.h',
'base/android/demuxer_android.h',
'base/android/demuxer_stream_player_params.cc',
'base/android/demuxer_stream_player_params.h',
+ 'base/android/media_client_android.cc',
+ 'base/android/media_client_android.h',
'base/android/media_codec_bridge.cc',
'base/android/media_codec_bridge.h',
+ 'base/android/media_codec_player.cc',
+ 'base/android/media_codec_player.h',
'base/android/media_common_android.h',
'base/android/media_decoder_job.cc',
'base/android/media_decoder_job.h',
'base/android/media_drm_bridge.cc',
'base/android/media_drm_bridge.h',
+ 'base/android/media_drm_bridge_delegate.cc',
+ 'base/android/media_drm_bridge_delegate.h',
'base/android/media_jni_registrar.cc',
'base/android/media_jni_registrar.h',
'base/android/media_player_android.cc',
@@ -1839,6 +1785,7 @@
'type': 'none',
'dependencies': [
'../base/base.gyp:base',
+ 'media_android_captureapitype',
'media_android_imageformat',
],
'export_dependent_settings': [
@@ -1850,6 +1797,15 @@
'includes': ['../build/java.gypi'],
},
{
+ # GN: //media/base/android:media_android_captureapitype
+ 'target_name': 'media_android_captureapitype',
+ 'type': 'none',
+ 'variables': {
+ 'source_file': 'video/capture/video_capture_device.h',
+ },
+ 'includes': [ '../build/android/java_cpp_enum.gypi' ],
+ },
+ {
# GN: //media/base/android:media_android_imageformat
'target_name': 'media_android_imageformat',
'type': 'none',
@@ -1863,23 +1819,6 @@
['media_use_ffmpeg==1', {
'targets': [
{
- # GN version: //media:ffmpeg_unittests
- 'target_name': 'ffmpeg_unittests',
- 'type': 'executable',
- 'dependencies': [
- '../base/base.gyp:base',
- '../base/base.gyp:base_i18n',
- '../base/base.gyp:test_support_base',
- '../testing/gtest.gyp:gtest',
- '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
- 'media',
- 'media_test_support',
- ],
- 'sources': [
- 'ffmpeg/ffmpeg_unittest.cc',
- ],
- },
- {
# GN version: //media:ffmpeg_regression_tests
'target_name': 'ffmpeg_regression_tests',
'type': 'executable',
@@ -1895,7 +1834,7 @@
'sources': [
'base/run_all_unittests.cc',
'ffmpeg/ffmpeg_regression_tests.cc',
- 'filters/pipeline_integration_test_base.cc',
+ 'test/pipeline_integration_test_base.cc',
],
'conditions': [
['os_posix==1 and OS!="mac"', {
@@ -1924,10 +1863,16 @@
'shared_memory_support',
],
'defines': [
- 'MEDIA_IMPLEMENTATION',
+ 'MEDIA_DISABLE_FFMPEG',
+ 'MEDIA_DISABLE_LIBVPX',
'MEDIA_FOR_CAST_IOS',
+ 'MEDIA_IMPLEMENTATION',
],
'direct_dependent_settings': {
+ 'defines': [
+ 'MEDIA_DISABLE_FFMPEG',
+ 'MEDIA_DISABLE_LIBVPX',
+ ],
'include_dirs': [
'..',
],
@@ -1939,10 +1884,24 @@
'base/mac/coremedia_glue.h',
'base/mac/coremedia_glue.mm',
'base/mac/corevideo_glue.h',
+ 'base/mac/video_frame_mac.cc',
+ 'base/mac/video_frame_mac.h',
'base/mac/videotoolbox_glue.h',
'base/mac/videotoolbox_glue.mm',
+ 'base/simd/convert_rgb_to_yuv.h',
+ 'base/simd/convert_rgb_to_yuv_c.cc',
+ 'base/simd/convert_yuv_to_rgb.h',
+ 'base/simd/convert_yuv_to_rgb_c.cc',
+ 'base/simd/filter_yuv.h',
+ 'base/simd/filter_yuv_c.cc',
'base/video_frame.cc',
'base/video_frame.h',
+ 'base/video_frame_metadata.cc',
+ 'base/video_frame_metadata.h',
+ 'base/video_util.cc',
+ 'base/video_util.h',
+ 'base/yuv_convert.cc',
+ 'base/yuv_convert.h',
],
'link_settings': {
'libraries': [
@@ -1966,13 +1925,15 @@
['include', '^base/mac/corevideo_glue\\.h$'],
['include', '^base/mac/videotoolbox_glue\\.h$'],
['include', '^base/mac/videotoolbox_glue\\.mm$'],
+ ['include', '^base/mac/video_frame_mac\\.h$'],
+ ['include', '^base/mac/video_frame_mac\\.cc$'],
],
}],
], # target_conditions
},
],
}],
- ['test_isolation_mode != "noop" and archive_gpu_tests==1', {
+ ['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'media_unittests_run',
@@ -1987,6 +1948,19 @@
'media_unittests.isolate',
],
},
+ {
+ 'target_name': 'audio_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'audio_unittests',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'audio_unittests.isolate',
+ ],
+ },
],
}],
],
diff --git a/chromium/media/media_cdm.gypi b/chromium/media/media_cdm.gypi
index 0c2510bfd3d..1068aa1580f 100644
--- a/chromium/media/media_cdm.gypi
+++ b/chromium/media/media_cdm.gypi
@@ -25,6 +25,7 @@
],
'targets': [
{
+ # GN version: //media/cdm/ppapi:clearkeycdm
'target_name': 'clearkeycdm',
'type': 'none',
# TODO(tomfinegan): Simplify this by unconditionally including all the
@@ -97,21 +98,12 @@
'msvs_disabled_warnings': [ 4267, ],
},
{
+ # GN version: //media/cdm/ppapi:clearkeycdmadapter_resources
'target_name': 'clearkeycdmadapter_resources',
'type': 'none',
- 'conditions': [
- ['branding == "Chrome"', {
- 'variables': {
- 'branding_path': '../chrome/app/theme/google_chrome/BRANDING',
- },
- }, { # else branding!="Chrome"
- 'variables': {
- 'branding_path': '../chrome/app/theme/chromium/BRANDING',
- },
- }],
- ],
'variables': {
'output_dir': '.',
+ 'branding_path': '../chrome/app/theme/<(branding_path_component)/BRANDING',
'template_input_path': '../chrome/app/chrome_version.rc.version',
'extra_variable_files_arguments':
[ '-f', 'cdm/ppapi/external_clear_key/BRANDING' ],
@@ -125,6 +117,7 @@
],
},
{
+ # GN version: //media/cdm/ppapi:clearkeycdmadapter
'target_name': 'clearkeycdmadapter',
'type': 'none',
# Check whether the plugin's origin URL is valid.
@@ -143,6 +136,7 @@
# Because clearkeycdm has type 'loadable_module' (see comments),
# we must explicitly specify this dependency.
'libraries': [
+ '-lrt',
# Built by clearkeycdm.
'<(PRODUCT_DIR)/libclearkeycdm.so',
],
diff --git a/chromium/media/media_cdm_adapter.gyp b/chromium/media/media_cdm_adapter.gyp
index e71efcc265d..d8338190369 100644
--- a/chromium/media/media_cdm_adapter.gyp
+++ b/chromium/media/media_cdm_adapter.gyp
@@ -16,6 +16,8 @@
['enable_pepper_cdms==1', {
'targets': [
{
+ # GN version: Use the template cdm_adapter in
+ # //media/cdm/ppapi/cdm_adapter.gni.
'target_name': 'cdmadapter',
'type': 'none',
'direct_dependent_settings': {
diff --git a/chromium/media/media_nacl.gyp b/chromium/media/media_nacl.gyp
index 9b82b5eb0cf..9697a8a21f0 100644
--- a/chromium/media/media_nacl.gyp
+++ b/chromium/media/media_nacl.gyp
@@ -27,7 +27,6 @@
'dependencies': [
'../base/base_nacl.gyp:base_nacl',
'../base/base_nacl.gyp:base_nacl_nonsfi',
- '../native_client/tools.gyp:prep_toolchain',
],
'defines': [
'MEDIA_IMPLEMENTATION',
@@ -51,9 +50,6 @@
'build_newlib': 0,
'build_pnacl_newlib': 1,
},
- 'dependencies': [
- '../native_client/tools.gyp:prep_toolchain',
- ],
'sources': [
'base/media.cc',
'base/media.h',
@@ -64,8 +60,6 @@
'base/simd/convert_yuv_to_rgb_c.cc',
'base/simd/filter_yuv.h',
'base/simd/filter_yuv_c.cc',
- 'base/simd/yuv_to_rgb_table.cc',
- 'base/simd/yuv_to_rgb_table.h',
'base/yuv_convert.cc',
'base/yuv_convert.h',
],
diff --git a/chromium/media/media_options.gni b/chromium/media/media_options.gni
index c2b47093078..746b5412208 100644
--- a/chromium/media/media_options.gni
+++ b/chromium/media/media_options.gni
@@ -45,9 +45,8 @@ declare_args() {
# default since it's not available on the normal Web Platform and costs money.
enable_mpeg2ts_stream_parser = false
- # Enables browser side Content Decryption Modules. Required for android where
- # the typical PPAPI based CDM is not available.
- enable_browser_cdms = is_android
+ # Experiment to enable mojo based media renderer: http://crbug.com/431776
+ enable_media_mojo_renderer = false
# TODO(GYP): This should be a platform define.
is_openbsd = false
diff --git a/chromium/media/media_unittests.isolate b/chromium/media/media_unittests.isolate
index 3a17689503d..449fa888701 100644
--- a/chromium/media/media_unittests.isolate
+++ b/chromium/media/media_unittests.isolate
@@ -6,6 +6,37 @@
'../base/base.isolate',
],
'conditions': [
+ ['use_x11==0', {
+ 'variables': {
+ 'command': [
+ '../testing/test_env.py',
+ '<(PRODUCT_DIR)/media_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ },
+ }],
+ ['use_x11==1', {
+ 'variables': {
+ 'command': [
+ '../testing/xvfb.py',
+ '<(PRODUCT_DIR)',
+ '<(PRODUCT_DIR)/media_unittests',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ 'files': [
+ '../testing/xvfb.py',
+ '<(PRODUCT_DIR)/xdisplaycheck',
+ ],
+ },
+ }],
['OS=="android" or OS=="linux" or OS=="mac" or OS=="win"', {
'variables': {
'files': [
@@ -15,15 +46,26 @@
}],
['OS=="linux" or OS=="mac" or OS=="win"', {
'variables': {
- 'command': [
+ 'files': [
+ '../testing/test_env.py',
'<(PRODUCT_DIR)/media_unittests<(EXECUTABLE_SUFFIX)',
],
+ },
+ }],
+ ['OS=="linux"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/libffmpegsumo.so',
+ ],
+ },
+ }],
+ ['OS=="mac"', {
+ 'variables': {
'files': [
- '<(PRODUCT_DIR)/media_unittests<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/ffmpegsumo.so',
],
},
}],
- # TODO(jmadill): should be automatic. http://crbug.com/418146
['OS=="win"', {
'variables': {
'files': [
@@ -31,17 +73,18 @@
],
},
}],
- ['OS=="mac"', {
+ ['OS=="mac" and asan==1 and fastbuild==0', {
'variables': {
'files': [
- '<(PRODUCT_DIR)/ffmpegsumo.so',
+ '<(PRODUCT_DIR)/ffmpegsumo.so.dSYM/',
+ '<(PRODUCT_DIR)/media_unittests.dSYM/',
],
},
}],
- ['OS=="linux"', {
+ ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
'variables': {
'files': [
- '<(PRODUCT_DIR)/libffmpegsumo.so',
+ '<(PRODUCT_DIR)/media_unittests.exe.pdb',
],
},
}],
diff --git a/chromium/media/midi/BUILD.gn b/chromium/media/midi/BUILD.gn
new file mode 100644
index 00000000000..8a043a5762f
--- /dev/null
+++ b/chromium/media/midi/BUILD.gn
@@ -0,0 +1,173 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/config.gni")
+import("//build/config/features.gni")
+import("//build/config/ui.gni")
+import("//media/media_options.gni")
+import("//testing/test.gni")
+
+if (is_android) {
+ # For generate_jni().
+ import("//build/config/android/rules.gni")
+}
+
+# Common configuration for targets in the media/midi directory.
+config("midi_config") {
+ if (use_alsa) {
+ defines = [
+ "USE_ALSA",
+ "USE_UDEV",
+ ]
+ }
+
+ visibility = [ ":*" ]
+}
+
+# USB MIDI specific source files that are used in Android port, and unit tests.
+# These can't easily be a source set because "midi" is a component and the
+# component export flags must always be the same.
+usb_midi_sources = [
+ "midi_manager_usb.cc",
+ "midi_manager_usb.h",
+ "usb_midi_descriptor_parser.cc",
+ "usb_midi_descriptor_parser.h",
+ "usb_midi_device.h",
+ "usb_midi_export.h",
+ "usb_midi_input_stream.cc",
+ "usb_midi_input_stream.h",
+ "usb_midi_jack.h",
+ "usb_midi_output_stream.cc",
+ "usb_midi_output_stream.h",
+]
+
+# Android specific Java source files that are used to generate jni header files.
+if (is_android) {
+ android_library("midi_java") {
+ deps = [
+ "//base:base_java",
+ ]
+
+ DEPRECATED_java_in_dir = "java/src"
+ }
+
+ generate_jni("midi_jni_headers") {
+ sources = [
+ "java/src/org/chromium/media/midi/UsbMidiDeviceAndroid.java",
+ "java/src/org/chromium/media/midi/UsbMidiDeviceFactoryAndroid.java",
+ ]
+ jni_package = "media/midi"
+ }
+}
+
+component("midi") {
+ sources = [
+ "midi_export.h",
+ "midi_manager.cc",
+ "midi_manager.h",
+ "midi_manager_mac.cc",
+ "midi_manager_mac.h",
+ "midi_manager_win.cc",
+ "midi_manager_win.h",
+ "midi_message_queue.cc",
+ "midi_message_queue.h",
+ "midi_message_util.cc",
+ "midi_message_util.h",
+ "midi_port_info.cc",
+ "midi_port_info.h",
+ "midi_scheduler.cc",
+ "midi_scheduler.h",
+ ]
+
+ configs += [ ":midi_config" ]
+
+ defines = [ "MIDI_IMPLEMENTATION" ]
+ deps = [
+ "//base",
+ ]
+ libs = []
+
+ if (is_android) {
+ sources += [
+ "midi_jni_registrar.cc",
+ "midi_jni_registrar.h",
+ "midi_manager_android.cc",
+ "usb_midi_device_android.cc",
+ "usb_midi_device_android.h",
+ "usb_midi_device_factory_android.cc",
+ "usb_midi_device_factory_android.h",
+ ] + usb_midi_sources
+
+ # Since the USB sources are part of the component.
+ defines += [ "EXPORT_USB_MIDI" ]
+
+ deps += [
+ "//base:i18n",
+ ":midi_jni_headers",
+ ]
+ }
+
+ if (is_mac) {
+ libs += [
+ "CoreAudio.framework",
+ "CoreMIDI.framework",
+ ]
+ }
+
+ if (is_win) {
+ deps += [ "//device/usb" ]
+ }
+
+ if (use_alsa && use_udev) {
+ deps += [
+ "//crypto",
+ "//crypto:platform",
+ ]
+ libs += [ "asound" ]
+ sources += [
+ "midi_manager_alsa.cc",
+ "midi_manager_alsa.h",
+ ]
+ }
+
+ if (use_udev) {
+ deps += [ "//device/udev_linux" ]
+ }
+}
+
+test("midi_unittests") {
+ sources = [
+ "midi_manager_unittest.cc",
+ "midi_manager_usb_unittest.cc",
+ "midi_message_queue_unittest.cc",
+ "midi_message_util_unittest.cc",
+ "usb_midi_descriptor_parser_unittest.cc",
+ "usb_midi_input_stream_unittest.cc",
+ "usb_midi_output_stream_unittest.cc",
+ ]
+
+ configs += [ ":midi_config" ]
+ deps = [
+ ":midi",
+ "//base/test/:run_all_unittests",
+ "//base/test/:test_support",
+ "//testing/gtest",
+ ]
+
+ if (!is_android) {
+ sources += usb_midi_sources
+ }
+
+ if (is_mac) {
+ sources += [ "midi_manager_mac_unittest.cc" ]
+ }
+
+ if (use_alsa && use_udev) {
+ sources += [ "midi_manager_alsa_unittest.cc" ]
+ }
+
+ if (use_x11) {
+ deps += [ "//tools/xdisplaycheck" ]
+ }
+}
diff --git a/chromium/media/midi/midi.gyp b/chromium/media/midi/midi.gyp
new file mode 100644
index 00000000000..ec5448d852d
--- /dev/null
+++ b/chromium/media/midi/midi.gyp
@@ -0,0 +1,237 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ 'conditions': [
+ ['(OS=="linux" or OS=="freebsd" or OS=="solaris") and (embedded!=1 or (chromecast==1 and target_arch!="arm"))', {
+ 'use_alsa%': 1,
+ }, {
+ 'use_alsa%': 0,
+ }],
+ ],
+ # Source files that are used in production code for Android, and in tests
+ # for others.
+ 'usb_midi_sources': [
+ 'midi_manager_usb.cc',
+ 'midi_manager_usb.h',
+ 'usb_midi_descriptor_parser.cc',
+ 'usb_midi_descriptor_parser.h',
+ 'usb_midi_device.h',
+ 'usb_midi_export.h',
+ 'usb_midi_input_stream.cc',
+ 'usb_midi_input_stream.h',
+ 'usb_midi_jack.h',
+ 'usb_midi_output_stream.cc',
+ 'usb_midi_output_stream.h',
+ ],
+ },
+ 'targets': [
+ {
+ # GN version: //media/midi
+ 'target_name': 'midi',
+ 'type': '<(component)',
+ 'dependencies': [
+ '../../base/base.gyp:base',
+ ],
+ 'defines': [
+ 'MIDI_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'midi_export.h',
+ 'midi_manager.cc',
+ 'midi_manager.h',
+ 'midi_manager_android.cc',
+ 'midi_manager_mac.cc',
+ 'midi_manager_mac.h',
+ 'midi_manager_win.cc',
+ 'midi_manager_win.h',
+ 'midi_message_queue.cc',
+ 'midi_message_queue.h',
+ 'midi_message_util.cc',
+ 'midi_message_util.h',
+ 'midi_port_info.cc',
+ 'midi_port_info.h',
+ 'midi_scheduler.cc',
+ 'midi_scheduler.h',
+ 'usb_midi_device_android.cc',
+ 'usb_midi_device_android.h',
+ 'usb_midi_device_factory_android.cc',
+ 'usb_midi_device_factory_android.h',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../..',
+ ],
+ },
+ 'conditions': [
+ ['use_alsa==1 and use_udev==1', {
+ 'dependencies': [
+ '../../crypto/crypto.gyp:crypto',
+ '../../device/udev_linux/udev.gyp:udev_linux',
+ ],
+ 'defines': [
+ 'USE_ALSA',
+ 'USE_UDEV',
+ ],
+ 'sources': [
+ 'midi_manager_alsa.cc',
+ 'midi_manager_alsa.h',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-lasound',
+ ],
+ },
+ }],
+ ['OS=="android"', {
+ 'dependencies': [
+ '../../base/base.gyp:base_i18n',
+ 'midi_jni_headers',
+ 'midi_java',
+ ],
+ 'sources': [
+ '<@(usb_midi_sources)',
+ 'midi_jni_registrar.cc',
+ 'midi_jni_registrar.h',
+ ],
+ 'defines': [
+ 'EXPORT_USB_MIDI',
+ ],
+ }],
+ ['OS=="mac"', {
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreMIDI.framework',
+ ],
+ },
+ }],
+ ['OS=="win"', {
+ 'dependencies': [
+ '../../device/usb/usb.gyp:device_usb',
+ ],
+ }],
+ ], # conditions
+ },
+ {
+ # GN version: //media/midi:midi_unittests
+ 'target_name': 'midi_unittests',
+ 'type': '<(gtest_target_type)',
+ 'dependencies': [
+ 'midi',
+ '../../base/base.gyp:base',
+ '../../base/base.gyp:run_all_unittests',
+ '../../testing/gtest.gyp:gtest',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'midi_manager_unittest.cc',
+ 'midi_manager_usb_unittest.cc',
+ 'midi_message_queue_unittest.cc',
+ 'midi_message_util_unittest.cc',
+ 'usb_midi_descriptor_parser_unittest.cc',
+ 'usb_midi_input_stream_unittest.cc',
+ 'usb_midi_output_stream_unittest.cc',
+ ],
+ 'conditions': [
+ ['use_alsa==1 and use_udev==1', {
+ 'defines': [
+ 'USE_ALSA',
+ 'USE_UDEV',
+ ],
+ 'sources': [
+ 'midi_manager_alsa_unittest.cc',
+ ],
+ }],
+ ['use_x11==1', {
+ 'dependencies': [
+ '../../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
+ ],
+ }],
+ ['OS=="android"', {
+ 'dependencies': [
+ '../../testing/android/native_test.gyp:native_test_native_code',
+ ],
+ }, {
+ 'sources': [
+ '<@(usb_midi_sources)',
+ ],
+ }],
+ ['OS=="mac"', {
+ 'sources': [
+ 'midi_manager_mac_unittest.cc',
+ ],
+ }],
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="android"', {
+ 'targets': [
+ {
+ # TODO(GN)
+ 'target_name': 'midi_unittests_apk',
+ 'type': 'none',
+ 'dependencies': [
+ 'midi_java',
+ 'midi_unittests',
+ ],
+ 'variables': {
+ 'test_suite_name': 'midi_unittests',
+ },
+ 'includes': ['../../build/apk_test.gypi'],
+ },
+ {
+ # GN: //media/midi:midi_java
+ 'target_name': 'midi_java',
+ 'type': 'none',
+ 'dependencies': [
+ '../../base/base.gyp:base',
+ ],
+ 'variables': {
+ 'java_in_dir': 'java',
+ },
+ 'includes': ['../../build/java.gypi' ],
+ },
+ {
+ # GN: //media/midi:midi_jni_headers
+ 'target_name': 'midi_jni_headers',
+ 'type': 'none',
+ 'sources': [
+ 'java/src/org/chromium/media/midi/UsbMidiDeviceAndroid.java',
+ 'java/src/org/chromium/media/midi/UsbMidiDeviceFactoryAndroid.java',
+ ],
+ 'variables': {
+ 'jni_gen_package': 'media/midi',
+ },
+ 'includes': ['../../build/jni_generator.gypi'],
+ },
+ ],
+ }],
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'midi_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'midi_unittests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'midi_unittests.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/media/midi/midi_export.h b/chromium/media/midi/midi_export.h
new file mode 100644
index 00000000000..18dc239bbbb
--- /dev/null
+++ b/chromium/media/midi/midi_export.h
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_EXPORT_H_
+#define MEDIA_MIDI_MIDI_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(MIDI_IMPLEMENTATION)
+#define MIDI_EXPORT __declspec(dllexport)
+#else
+#define MIDI_EXPORT __declspec(dllimport)
+#endif // defined(MIDI_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(MIDI_IMPLEMENTATION)
+#define MIDI_EXPORT __attribute__((visibility("default")))
+#else
+#define MIDI_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define MIDI_EXPORT
+#endif
+
+#endif // MEDIA_MIDI_MIDI_EXPORT_H_
diff --git a/chromium/media/midi/midi_manager.cc b/chromium/media/midi/midi_manager.cc
index d0eb50f5090..31f819aa0e6 100644
--- a/chromium/media/midi/midi_manager.cc
+++ b/chromium/media/midi/midi_manager.cc
@@ -5,11 +5,12 @@
#include "media/midi/midi_manager.h"
#include "base/bind.h"
-#include "base/debug/trace_event.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/trace_event/trace_event.h"
namespace media {
+namespace midi {
MidiManager::MidiManager()
: initialized_(false),
@@ -19,8 +20,8 @@ MidiManager::MidiManager()
MidiManager::~MidiManager() {
}
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(USE_ALSA) && \
- !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+#if !defined(OS_MACOSX) && !defined(OS_WIN) && \
+ !(defined(USE_ALSA) && defined(USE_UDEV)) && !defined(OS_ANDROID)
MidiManager* MidiManager::Create() {
return new MidiManager;
}
@@ -94,6 +95,15 @@ void MidiManager::EndSession(MidiManagerClient* client) {
pending_clients_.erase(client);
}
+void MidiManager::AccumulateMidiBytesSent(MidiManagerClient* client, size_t n) {
+ {
+ base::AutoLock auto_lock(lock_);
+ if (clients_.find(client) == clients_.end())
+ return;
+ }
+ client->AccumulateMidiBytesSent(n);
+}
+
void MidiManager::DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
@@ -130,6 +140,22 @@ void MidiManager::AddOutputPort(const MidiPortInfo& info) {
client->AddOutputPort(info);
}
+void MidiManager::SetInputPortState(uint32 port_index, MidiPortState state) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_LT(port_index, input_ports_.size());
+ input_ports_[port_index].state = state;
+ for (auto client : clients_)
+ client->SetInputPortState(port_index, state);
+}
+
+void MidiManager::SetOutputPortState(uint32 port_index, MidiPortState state) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_LT(port_index, output_ports_.size());
+ output_ports_[port_index].state = state;
+ for (auto client : clients_)
+ client->SetOutputPortState(port_index, state);
+}
+
void MidiManager::ReceiveMidiData(
uint32 port_index,
const uint8* data,
@@ -169,4 +195,5 @@ void MidiManager::AddInitialPorts(MidiManagerClient* client) {
client->AddOutputPort(info);
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager.h b/chromium/media/midi/midi_manager.h
index d7e7e47a1a6..ab874b4d9e4 100644
--- a/chromium/media/midi/midi_manager.h
+++ b/chromium/media/midi/midi_manager.h
@@ -12,7 +12,7 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "base/time/time.h"
-#include "media/base/media_export.h"
+#include "media/midi/midi_export.h"
#include "media/midi/midi_port_info.h"
#include "media/midi/midi_result.h"
@@ -21,11 +21,12 @@ class SingleThreadTaskRunner;
} // namespace base
namespace media {
+namespace midi {
// A MidiManagerClient registers with the MidiManager to receive MIDI data.
// See MidiManager::RequestAccess() and MidiManager::ReleaseAccess()
// for details.
-class MEDIA_EXPORT MidiManagerClient {
+class MIDI_EXPORT MidiManagerClient {
public:
virtual ~MidiManagerClient() {}
@@ -35,10 +36,10 @@ class MEDIA_EXPORT MidiManagerClient {
virtual void AddInputPort(const MidiPortInfo& info) = 0;
virtual void AddOutputPort(const MidiPortInfo& info) = 0;
- // TODO(toyoshim): DisableInputPort(const MidiPortInfo& info) and
- // DisableOutputPort(const MidiPortInfo& info) should be added.
- // On DisableInputPort(), internal states, e.g. received_messages_queues in
- // MidiHost, should be reset.
+ // SetInputPortState() and SetOutputPortState() are called to notify a known
+ // device gets disconnected, or connected again.
+ virtual void SetInputPortState(uint32 port_index, MidiPortState state) = 0;
+ virtual void SetOutputPortState(uint32 port_index, MidiPortState state) = 0;
// CompleteStartSession() is called when platform dependent preparation is
// finished.
@@ -63,7 +64,7 @@ class MEDIA_EXPORT MidiManagerClient {
};
// Manages access to all MIDI hardware.
-class MEDIA_EXPORT MidiManager {
+class MIDI_EXPORT MidiManager {
public:
static const size_t kMaxPendingClientCount = 128;
@@ -86,6 +87,10 @@ class MEDIA_EXPORT MidiManager {
// A client calls EndSession() to stop receiving MIDI data.
void EndSession(MidiManagerClient* client);
+ // Invoke AccumulateMidiBytesSent() for |client| safely. If the session was
+ // already closed, do nothing.
+ void AccumulateMidiBytesSent(MidiManagerClient* client, size_t n);
+
// DispatchSendMidiData() is called when MIDI data should be sent to the MIDI
// system.
// This method is supposed to return immediately and should not block.
@@ -122,6 +127,8 @@ class MEDIA_EXPORT MidiManager {
void AddInputPort(const MidiPortInfo& info);
void AddOutputPort(const MidiPortInfo& info);
+ void SetInputPortState(uint32 port_index, MidiPortState state);
+ void SetOutputPortState(uint32 port_index, MidiPortState state);
// Dispatches to all clients.
// TODO(toyoshim): Fix the mac implementation to use
@@ -177,6 +184,7 @@ class MEDIA_EXPORT MidiManager {
DISALLOW_COPY_AND_ASSIGN(MidiManager);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_MANAGER_H_
diff --git a/chromium/media/midi/midi_manager_alsa.cc b/chromium/media/midi/midi_manager_alsa.cc
index 492573b13de..03c27b76a81 100644
--- a/chromium/media/midi/midi_manager_alsa.cc
+++ b/chromium/media/midi/midi_manager_alsa.cc
@@ -4,23 +4,25 @@
#include "media/midi/midi_manager_alsa.h"
-#include <alsa/asoundlib.h>
+#include <poll.h>
#include <stdlib.h>
#include <algorithm>
#include <string>
#include "base/bind.h"
+#include "base/json/json_string_value_serializer.h"
#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "base/posix/eintr_wrapper.h"
+#include "base/safe_strerror_posix.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
-#include "base/threading/thread.h"
#include "base/time/time.h"
+#include "crypto/sha2.h"
#include "media/midi/midi_port_info.h"
namespace media {
+namespace midi {
namespace {
@@ -30,6 +32,41 @@ namespace {
// realtime messages with respect to sysex.
const size_t kSendBufferSize = 256;
+// Minimum client id for which we will have ALSA card devices for. When we
+// are searching for card devices (used to get the path, id, and manufacturer),
+// we don't want to get confused by kernel clients that do not have a card.
+// See seq_clientmgr.c in the ALSA code for this.
+// TODO(agoode): Add proper client -> card export from the kernel to avoid
+// hardcoding.
+const int kMinimumClientIdForCards = 16;
+
+// ALSA constants.
+const char kAlsaHw[] = "hw";
+
+// udev constants.
+const char kUdev[] = "udev";
+const char kUdevSubsystemSound[] = "sound";
+const char kUdevPropertySoundInitialized[] = "SOUND_INITIALIZED";
+const char kUdevActionChange[] = "change";
+const char kUdevActionRemove[] = "remove";
+
+const char kUdevIdVendor[] = "ID_VENDOR";
+const char kUdevIdVendorEnc[] = "ID_VENDOR_ENC";
+const char kUdevIdVendorFromDatabase[] = "ID_VENDOR_FROM_DATABASE";
+const char kUdevIdVendorId[] = "ID_VENDOR_ID";
+const char kUdevIdModelId[] = "ID_MODEL_ID";
+const char kUdevIdBus[] = "ID_BUS";
+const char kUdevIdPath[] = "ID_PATH";
+const char kUdevIdUsbInterfaceNum[] = "ID_USB_INTERFACE_NUM";
+const char kUdevIdSerialShort[] = "ID_SERIAL_SHORT";
+
+const char kSysattrVendorName[] = "vendor_name";
+const char kSysattrVendor[] = "vendor";
+const char kSysattrModel[] = "model";
+const char kSysattrGuid[] = "guid";
+
+const char kCardSyspath[] = "/card";
+
// Constants for the capabilities we search for in inputs and outputs.
// See http://www.alsa-project.org/alsa-doc/alsa-lib/seq.html.
const unsigned int kRequiredInputPortCaps =
@@ -37,20 +74,73 @@ const unsigned int kRequiredInputPortCaps =
const unsigned int kRequiredOutputPortCaps =
SND_SEQ_PORT_CAP_WRITE | SND_SEQ_PORT_CAP_SUBS_WRITE;
-int AddrToInt(const snd_seq_addr_t* addr) {
- return (addr->client << 8) | addr->port;
+const unsigned int kCreateOutputPortCaps =
+ SND_SEQ_PORT_CAP_READ | SND_SEQ_PORT_CAP_NO_EXPORT;
+const unsigned int kCreateInputPortCaps =
+ SND_SEQ_PORT_CAP_WRITE | SND_SEQ_PORT_CAP_NO_EXPORT;
+const unsigned int kCreatePortType =
+ SND_SEQ_PORT_TYPE_MIDI_GENERIC | SND_SEQ_PORT_TYPE_APPLICATION;
+
+int AddrToInt(int client, int port) {
+ return (client << 8) | port;
}
-class CardInfo {
- public:
- CardInfo(const std::string name, const std::string manufacturer,
- const std::string driver)
- : name_(name), manufacturer_(manufacturer), driver_(driver) {
+// Returns true if this client has an ALSA card associated with it.
+bool IsCardClient(snd_seq_client_type_t type, int client_id) {
+ return (type == SND_SEQ_KERNEL_CLIENT) &&
+ (client_id >= kMinimumClientIdForCards);
+}
+
+// TODO(agoode): Move this to device/udev_linux.
+const std::string UdevDeviceGetPropertyOrSysattr(
+ struct udev_device* udev_device,
+ const char* property_key,
+ const char* sysattr_key) {
+ // First try the property.
+ std::string value =
+ device::UdevDeviceGetPropertyValue(udev_device, property_key);
+
+ // If no property, look for sysattrs and walk up the parent devices too.
+ while (value.empty() && udev_device) {
+ value = device::UdevDeviceGetSysattrValue(udev_device, sysattr_key);
+ udev_device = device::udev_device_get_parent(udev_device);
}
- const std::string name_;
- const std::string manufacturer_;
- const std::string driver_;
-};
+ return value;
+}
+
+int GetCardNumber(udev_device* dev) {
+ const char* syspath = device::udev_device_get_syspath(dev);
+ if (!syspath)
+ return -1;
+
+ std::string syspath_str(syspath);
+ size_t i = syspath_str.rfind(kCardSyspath);
+ if (i == std::string::npos)
+ return -1;
+
+ int number;
+ if (!base::StringToInt(syspath_str.substr(i + strlen(kCardSyspath)), &number))
+ return -1;
+ return number;
+}
+
+std::string GetVendor(udev_device* dev) {
+ // Try to get the vendor string. Sometimes it is encoded.
+ std::string vendor = device::UdevDecodeString(
+ device::UdevDeviceGetPropertyValue(dev, kUdevIdVendorEnc));
+ // Sometimes it is not encoded.
+ if (vendor.empty())
+ vendor =
+ UdevDeviceGetPropertyOrSysattr(dev, kUdevIdVendor, kSysattrVendorName);
+ return vendor;
+}
+
+void SetStringIfNonEmpty(base::DictionaryValue* value,
+ const std::string& path,
+ const std::string& in_value) {
+ if (!in_value.empty())
+ value->SetString(path, in_value);
+}
} // namespace
@@ -58,8 +148,11 @@ MidiManagerAlsa::MidiManagerAlsa()
: in_client_(NULL),
out_client_(NULL),
out_client_id_(-1),
- in_port_(-1),
+ in_port_id_(-1),
+ alsa_cards_deleter_(&alsa_cards_),
+ alsa_card_midi_count_(0),
decoder_(NULL),
+ udev_(device::udev_new()),
send_thread_("MidiSendThread"),
event_thread_("MidiEventThread"),
event_thread_shutdown_(false) {
@@ -68,17 +161,46 @@ MidiManagerAlsa::MidiManagerAlsa()
snd_midi_event_no_status(decoder_, 1);
}
+MidiManagerAlsa::~MidiManagerAlsa() {
+ // Tell the event thread it will soon be time to shut down. This gives
+ // us assurance the thread will stop in case the SND_SEQ_EVENT_CLIENT_EXIT
+ // message is lost.
+ {
+ base::AutoLock lock(shutdown_lock_);
+ event_thread_shutdown_ = true;
+ }
+
+ // Stop the send thread.
+ send_thread_.Stop();
+
+ // Close the out client. This will trigger the event thread to stop,
+ // because of SND_SEQ_EVENT_CLIENT_EXIT.
+ if (out_client_)
+ snd_seq_close(out_client_);
+
+ // Wait for the event thread to stop.
+ event_thread_.Stop();
+
+ // Close the in client.
+ if (in_client_)
+ snd_seq_close(in_client_);
+
+ // Free the decoder.
+ snd_midi_event_free(decoder_);
+}
+
void MidiManagerAlsa::StartInitialization() {
// TODO(agoode): Move off I/O thread. See http://crbug.com/374341.
// Create client handles.
- int err = snd_seq_open(&in_client_, "hw", SND_SEQ_OPEN_INPUT, 0);
+ int err =
+ snd_seq_open(&in_client_, kAlsaHw, SND_SEQ_OPEN_INPUT, SND_SEQ_NONBLOCK);
if (err != 0) {
VLOG(1) << "snd_seq_open fails: " << snd_strerror(err);
return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
}
- int in_client_id = snd_seq_client_id(in_client_);
- err = snd_seq_open(&out_client_, "hw", SND_SEQ_OPEN_OUTPUT, 0);
+ in_client_id_ = snd_seq_client_id(in_client_);
+ err = snd_seq_open(&out_client_, kAlsaHw, SND_SEQ_OPEN_OUTPUT, 0);
if (err != 0) {
VLOG(1) << "snd_seq_open fails: " << snd_strerror(err);
return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
@@ -98,13 +220,11 @@ void MidiManagerAlsa::StartInitialization() {
}
// Create input port.
- in_port_ = snd_seq_create_simple_port(in_client_, NULL,
- SND_SEQ_PORT_CAP_WRITE |
- SND_SEQ_PORT_CAP_NO_EXPORT,
- SND_SEQ_PORT_TYPE_MIDI_GENERIC |
- SND_SEQ_PORT_TYPE_APPLICATION);
- if (in_port_ < 0) {
- VLOG(1) << "snd_seq_create_simple_port fails: " << snd_strerror(in_port_);
+ in_port_id_ = snd_seq_create_simple_port(
+ in_client_, NULL, kCreateInputPortCaps, kCreatePortType);
+ if (in_port_id_ < 0) {
+ VLOG(1) << "snd_seq_create_simple_port fails: "
+ << snd_strerror(in_port_id_);
return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
}
@@ -115,8 +235,8 @@ void MidiManagerAlsa::StartInitialization() {
snd_seq_addr_t announce_dest;
announce_sender.client = SND_SEQ_CLIENT_SYSTEM;
announce_sender.port = SND_SEQ_PORT_SYSTEM_ANNOUNCE;
- announce_dest.client = in_client_id;
- announce_dest.port = in_port_;
+ announce_dest.client = in_client_id_;
+ announce_dest.port = in_port_id_;
snd_seq_port_subscribe_set_sender(subs, &announce_sender);
snd_seq_port_subscribe_set_dest(subs, &announce_dest);
err = snd_seq_subscribe_port(in_client_, subs);
@@ -126,328 +246,1155 @@ void MidiManagerAlsa::StartInitialization() {
return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
}
- // Use a heuristic to extract the list of manufacturers for the hardware MIDI
- // devices. This won't work for all devices. It is also brittle until
- // hotplug is implemented. (See http://crbug.com/279097.)
- // TODO(agoode): Make manufacturer extraction simple and reliable.
- // http://crbug.com/377250.
- ScopedVector<CardInfo> cards;
- snd_ctl_card_info_t* card;
- snd_rawmidi_info_t* midi_out;
- snd_rawmidi_info_t* midi_in;
- snd_ctl_card_info_alloca(&card);
- snd_rawmidi_info_alloca(&midi_out);
- snd_rawmidi_info_alloca(&midi_in);
- for (int index = -1; !snd_card_next(&index) && index >= 0; ) {
- const std::string id = base::StringPrintf("hw:CARD=%i", index);
- snd_ctl_t* handle;
- int err = snd_ctl_open(&handle, id.c_str(), 0);
- if (err != 0) {
- VLOG(1) << "snd_ctl_open fails: " << snd_strerror(err);
- continue;
- }
- err = snd_ctl_card_info(handle, card);
- if (err != 0) {
- VLOG(1) << "snd_ctl_card_info fails: " << snd_strerror(err);
- snd_ctl_close(handle);
- continue;
- }
- // Enumerate any rawmidi devices (not subdevices) and extract CardInfo.
- for (int device = -1;
- !snd_ctl_rawmidi_next_device(handle, &device) && device >= 0; ) {
- bool output;
- bool input;
- snd_rawmidi_info_set_device(midi_out, device);
- snd_rawmidi_info_set_subdevice(midi_out, 0);
- snd_rawmidi_info_set_stream(midi_out, SND_RAWMIDI_STREAM_OUTPUT);
- output = snd_ctl_rawmidi_info(handle, midi_out) == 0;
- snd_rawmidi_info_set_device(midi_in, device);
- snd_rawmidi_info_set_subdevice(midi_in, 0);
- snd_rawmidi_info_set_stream(midi_in, SND_RAWMIDI_STREAM_INPUT);
- input = snd_ctl_rawmidi_info(handle, midi_in) == 0;
- if (!output && !input)
- continue;
-
- snd_rawmidi_info_t* midi = midi_out ? midi_out : midi_in;
- const std::string name = snd_rawmidi_info_get_name(midi);
- // We assume that card longname is in the format of
- // "<manufacturer> <name> at <bus>". Otherwise, we give up to detect
- // a manufacturer name here.
- std::string manufacturer;
- const std::string card_name = snd_ctl_card_info_get_longname(card);
- size_t at_index = card_name.rfind(" at ");
- if (std::string::npos != at_index) {
- size_t name_index = card_name.rfind(name, at_index - 1);
- if (std::string::npos != name_index)
- manufacturer = card_name.substr(0, name_index - 1);
- }
- const std::string driver = snd_ctl_card_info_get_driver(card);
- cards.push_back(new CardInfo(name, manufacturer, driver));
- }
- snd_ctl_close(handle);
+ // Generate hotplug events for existing ports.
+ // TODO(agoode): Check the return value for failure.
+ EnumerateAlsaPorts();
+
+ // Initialize udev monitor.
+ udev_monitor_.reset(
+ device::udev_monitor_new_from_netlink(udev_.get(), kUdev));
+ if (!udev_monitor_.get()) {
+ VLOG(1) << "udev_monitor_new_from_netlink fails";
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+ err = device::udev_monitor_filter_add_match_subsystem_devtype(
+ udev_monitor_.get(), kUdevSubsystemSound, nullptr);
+ if (err != 0) {
+ VLOG(1) << "udev_monitor_add_match_subsystem fails: "
+ << safe_strerror(-err);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+ err = device::udev_monitor_enable_receiving(udev_monitor_.get());
+ if (err != 0) {
+ VLOG(1) << "udev_monitor_enable_receiving fails: " << safe_strerror(-err);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
}
- // Enumerate all ports in all clients.
- snd_seq_client_info_t* client_info;
- snd_seq_client_info_alloca(&client_info);
- snd_seq_port_info_t* port_info;
- snd_seq_port_info_alloca(&port_info);
+ // Generate hotplug events for existing udev devices.
+ EnumerateUdevCards();
- snd_seq_client_info_set_client(client_info, -1);
- // Enumerate clients.
- uint32 current_input = 0;
- unsigned int current_card = 0;
- while (!snd_seq_query_next_client(in_client_, client_info)) {
- int client_id = snd_seq_client_info_get_client(client_info);
- if ((client_id == in_client_id) || (client_id == out_client_id_)) {
- // Skip our own clients.
- continue;
+ // Start processing events.
+ event_thread_.Start();
+ event_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiManagerAlsa::ScheduleEventLoop, base::Unretained(this)));
+
+ CompleteInitialization(MIDI_OK);
+}
+
+void MidiManagerAlsa::DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) {
+ // Not correct right now. http://crbug.com/374341.
+ if (!send_thread_.IsRunning())
+ send_thread_.Start();
+
+ base::TimeDelta delay;
+ if (timestamp != 0.0) {
+ base::TimeTicks time_to_send =
+ base::TimeTicks() + base::TimeDelta::FromMicroseconds(
+ timestamp * base::Time::kMicrosecondsPerSecond);
+ delay = std::max(time_to_send - base::TimeTicks::Now(), base::TimeDelta());
+ }
+
+ send_thread_.message_loop()->PostDelayedTask(
+ FROM_HERE, base::Bind(&MidiManagerAlsa::SendMidiData,
+ base::Unretained(this), port_index, data),
+ delay);
+
+ // Acknowledge send.
+ send_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MidiManagerClient::AccumulateMidiBytesSent,
+ base::Unretained(client), data.size()));
+}
+
+MidiManagerAlsa::MidiPort::Id::Id() = default;
+
+MidiManagerAlsa::MidiPort::Id::Id(const std::string& bus,
+ const std::string& vendor_id,
+ const std::string& model_id,
+ const std::string& usb_interface_num,
+ const std::string& serial)
+ : bus_(bus),
+ vendor_id_(vendor_id),
+ model_id_(model_id),
+ usb_interface_num_(usb_interface_num),
+ serial_(serial) {
+}
+
+MidiManagerAlsa::MidiPort::Id::Id(const Id&) = default;
+
+MidiManagerAlsa::MidiPort::Id::~Id() = default;
+
+bool MidiManagerAlsa::MidiPort::Id::operator==(const Id& rhs) const {
+ return (bus_ == rhs.bus_) && (vendor_id_ == rhs.vendor_id_) &&
+ (model_id_ == rhs.model_id_) &&
+ (usb_interface_num_ == rhs.usb_interface_num_) &&
+ (serial_ == rhs.serial_);
+}
+
+bool MidiManagerAlsa::MidiPort::Id::empty() const {
+ return bus_.empty() && vendor_id_.empty() && model_id_.empty() &&
+ usb_interface_num_.empty() && serial_.empty();
+}
+
+MidiManagerAlsa::MidiPort::MidiPort(const std::string& path,
+ const Id& id,
+ int client_id,
+ int port_id,
+ int midi_device,
+ const std::string& client_name,
+ const std::string& port_name,
+ const std::string& manufacturer,
+ const std::string& version,
+ Type type)
+ : id_(id),
+ midi_device_(midi_device),
+ type_(type),
+ path_(path),
+ client_id_(client_id),
+ port_id_(port_id),
+ client_name_(client_name),
+ port_name_(port_name),
+ manufacturer_(manufacturer),
+ version_(version),
+ web_port_index_(0),
+ connected_(true) {
+}
+
+MidiManagerAlsa::MidiPort::~MidiPort() {
+}
+
+// Note: keep synchronized with the MidiPort::Match* methods.
+scoped_ptr<base::Value> MidiManagerAlsa::MidiPort::Value() const {
+ scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue);
+
+ std::string type;
+ switch (type_) {
+ case Type::kInput:
+ type = "input";
+ break;
+ case Type::kOutput:
+ type = "output";
+ break;
+ }
+ value->SetString("type", type);
+ SetStringIfNonEmpty(value.get(), "path", path_);
+ SetStringIfNonEmpty(value.get(), "clientName", client_name_);
+ SetStringIfNonEmpty(value.get(), "portName", port_name_);
+ value->SetInteger("clientId", client_id_);
+ value->SetInteger("portId", port_id_);
+ value->SetInteger("midiDevice", midi_device_);
+
+ // Flatten id fields.
+ SetStringIfNonEmpty(value.get(), "bus", id_.bus());
+ SetStringIfNonEmpty(value.get(), "vendorId", id_.vendor_id());
+ SetStringIfNonEmpty(value.get(), "modelId", id_.model_id());
+ SetStringIfNonEmpty(value.get(), "usbInterfaceNum", id_.usb_interface_num());
+ SetStringIfNonEmpty(value.get(), "serial", id_.serial());
+
+ return value.Pass();
+}
+
+std::string MidiManagerAlsa::MidiPort::JSONValue() const {
+ std::string json;
+ JSONStringValueSerializer serializer(&json);
+ serializer.Serialize(*Value().get());
+ return json;
+}
+
+// TODO(agoode): Do not use SHA256 here. Instead store a persistent
+// mapping and just use a UUID or other random string.
+// http://crbug.com/465320
+std::string MidiManagerAlsa::MidiPort::OpaqueKey() const {
+ uint8 hash[crypto::kSHA256Length];
+ crypto::SHA256HashString(JSONValue(), &hash, sizeof(hash));
+ return base::HexEncode(&hash, sizeof(hash));
+}
+
+bool MidiManagerAlsa::MidiPort::MatchConnected(const MidiPort& query) const {
+ // Matches on:
+ // connected == true
+ // type
+ // path
+ // id
+ // client_id
+ // port_id
+ // midi_device
+ // client_name
+ // port_name
+ return connected() && (type() == query.type()) && (path() == query.path()) &&
+ (id() == query.id()) && (client_id() == query.client_id()) &&
+ (port_id() == query.port_id()) &&
+ (midi_device() == query.midi_device()) &&
+ (client_name() == query.client_name()) &&
+ (port_name() == query.port_name());
+}
+
+bool MidiManagerAlsa::MidiPort::MatchCardPass1(const MidiPort& query) const {
+ // Matches on:
+ // connected == false
+ // type
+ // path
+ // id
+ // port_id
+ // midi_device
+ return MatchCardPass2(query) && (path() == query.path());
+}
+
+bool MidiManagerAlsa::MidiPort::MatchCardPass2(const MidiPort& query) const {
+ // Matches on:
+ // connected == false
+ // type
+ // id
+ // port_id
+ // midi_device
+ return !connected() && (type() == query.type()) && (id() == query.id()) &&
+ (port_id() == query.port_id()) &&
+ (midi_device() == query.midi_device());
+}
+
+bool MidiManagerAlsa::MidiPort::MatchNoCardPass1(const MidiPort& query) const {
+ // Matches on:
+ // connected == false
+ // type
+ // path.empty(), for both this and query
+ // id.empty(), for both this and query
+ // client_id
+ // port_id
+ // client_name
+ // port_name
+ // midi_device == -1, for both this and query
+ return MatchNoCardPass2(query) && (client_id() == query.client_id());
+}
+
+bool MidiManagerAlsa::MidiPort::MatchNoCardPass2(const MidiPort& query) const {
+ // Matches on:
+ // connected == false
+ // type
+ // path.empty(), for both this and query
+ // id.empty(), for both this and query
+ // port_id
+ // client_name
+ // port_name
+ // midi_device == -1, for both this and query
+ return !connected() && (type() == query.type()) && path().empty() &&
+ query.path().empty() && id().empty() && query.id().empty() &&
+ (port_id() == query.port_id()) &&
+ (client_name() == query.client_name()) &&
+ (port_name() == query.port_name()) && (midi_device() == -1) &&
+ (query.midi_device() == -1);
+}
+
+MidiManagerAlsa::MidiPortStateBase::~MidiPortStateBase() {
+}
+
+ScopedVector<MidiManagerAlsa::MidiPort>*
+MidiManagerAlsa::MidiPortStateBase::ports() {
+ return &ports_;
+}
+
+MidiManagerAlsa::MidiPortStateBase::iterator
+MidiManagerAlsa::MidiPortStateBase::Find(
+ const MidiManagerAlsa::MidiPort& port) {
+ auto result = FindConnected(port);
+ if (result == end())
+ result = FindDisconnected(port);
+ return result;
+}
+
+MidiManagerAlsa::MidiPortStateBase::iterator
+MidiManagerAlsa::MidiPortStateBase::FindConnected(
+ const MidiManagerAlsa::MidiPort& port) {
+ // Exact match required for connected ports.
+ auto it = std::find_if(ports_.begin(), ports_.end(), [&port](MidiPort* p) {
+ return p->MatchConnected(port);
+ });
+ return it;
+}
+
+MidiManagerAlsa::MidiPortStateBase::iterator
+MidiManagerAlsa::MidiPortStateBase::FindDisconnected(
+ const MidiManagerAlsa::MidiPort& port) {
+ // Always match on:
+ // type
+ // Possible things to match on:
+ // path
+ // id
+ // client_id
+ // port_id
+ // midi_device
+ // client_name
+ // port_name
+
+ if (!port.path().empty()) {
+ // If path is present, then we have a card-based client.
+
+ // Pass 1. Match on path, id, midi_device, port_id.
+ // This is the best possible match for hardware card-based clients.
+ // This will also match the empty id correctly for devices without an id.
+ auto it = std::find_if(ports_.begin(), ports_.end(), [&port](MidiPort* p) {
+ return p->MatchCardPass1(port);
+ });
+ if (it != ports_.end())
+ return it;
+
+ if (!port.id().empty()) {
+ // Pass 2. Match on id, midi_device, port_id.
+ // This will give us a high-confidence match when a user moves a device to
+ // another USB/Firewire/Thunderbolt/etc port, but only works if the device
+ // has a hardware id.
+ it = std::find_if(ports_.begin(), ports_.end(), [&port](MidiPort* p) {
+ return p->MatchCardPass2(port);
+ });
+ if (it != ports_.end())
+ return it;
}
- const std::string client_name = snd_seq_client_info_get_name(client_info);
- snd_seq_port_info_set_client(port_info, client_id);
- snd_seq_port_info_set_port(port_info, -1);
+ } else {
+ // Else, we have a non-card-based client.
+ // Pass 1. Match on client_id, port_id, client_name, port_name.
+ // This will give us a reasonably good match.
+ auto it = std::find_if(ports_.begin(), ports_.end(), [&port](MidiPort* p) {
+ return p->MatchNoCardPass1(port);
+ });
+ if (it != ports_.end())
+ return it;
+
+ // Pass 2. Match on port_id, client_name, port_name.
+ // This is weaker but similar to pass 2 in the hardware card-based clients
+ // match.
+ it = std::find_if(ports_.begin(), ports_.end(), [&port](MidiPort* p) {
+ return p->MatchNoCardPass2(port);
+ });
+ if (it != ports_.end())
+ return it;
+ }
+
+ // No match.
+ return ports_.end();
+}
+
+MidiManagerAlsa::MidiPortStateBase::MidiPortStateBase() {
+}
+
+void MidiManagerAlsa::TemporaryMidiPortState::Insert(
+ scoped_ptr<MidiPort> port) {
+ ports()->push_back(port.Pass());
+}
+
+MidiManagerAlsa::MidiPortState::MidiPortState()
+ : num_input_ports_(0), num_output_ports_(0) {
+}
+
+uint32 MidiManagerAlsa::MidiPortState::Insert(scoped_ptr<MidiPort> port) {
+ // Add the web midi index.
+ uint32 web_port_index = 0;
+ switch (port->type()) {
+ case MidiPort::Type::kInput:
+ web_port_index = num_input_ports_++;
+ break;
+ case MidiPort::Type::kOutput:
+ web_port_index = num_output_ports_++;
+ break;
+ }
+ port->set_web_port_index(web_port_index);
+ ports()->push_back(port.Pass());
+ return web_port_index;
+}
+
+MidiManagerAlsa::AlsaSeqState::AlsaSeqState()
+ : clients_deleter_(&clients_), card_client_count_(0) {
+}
+
+MidiManagerAlsa::AlsaSeqState::~AlsaSeqState() {
+}
+
+void MidiManagerAlsa::AlsaSeqState::ClientStart(int client_id,
+ const std::string& client_name,
+ snd_seq_client_type_t type) {
+ ClientExit(client_id);
+ clients_[client_id] = new Client(client_name, type);
+ if (IsCardClient(type, client_id))
+ ++card_client_count_;
+}
+
+bool MidiManagerAlsa::AlsaSeqState::ClientStarted(int client_id) {
+ return clients_.find(client_id) != clients_.end();
+}
+
+void MidiManagerAlsa::AlsaSeqState::ClientExit(int client_id) {
+ auto it = clients_.find(client_id);
+ if (it != clients_.end()) {
+ if (IsCardClient(it->second->type(), client_id))
+ --card_client_count_;
+ delete it->second;
+ clients_.erase(it);
+ }
+}
+
+void MidiManagerAlsa::AlsaSeqState::PortStart(
+ int client_id,
+ int port_id,
+ const std::string& port_name,
+ MidiManagerAlsa::AlsaSeqState::PortDirection direction,
+ bool midi) {
+ auto it = clients_.find(client_id);
+ if (it != clients_.end())
+ it->second->AddPort(port_id,
+ scoped_ptr<Port>(new Port(port_name, direction, midi)));
+}
+void MidiManagerAlsa::AlsaSeqState::PortExit(int client_id, int port_id) {
+ auto it = clients_.find(client_id);
+ if (it != clients_.end())
+ it->second->RemovePort(port_id);
+}
+
+snd_seq_client_type_t MidiManagerAlsa::AlsaSeqState::ClientType(
+ int client_id) const {
+ auto it = clients_.find(client_id);
+ if (it == clients_.end())
+ return SND_SEQ_USER_CLIENT;
+ return it->second->type();
+}
+
+scoped_ptr<MidiManagerAlsa::TemporaryMidiPortState>
+MidiManagerAlsa::AlsaSeqState::ToMidiPortState(const AlsaCardMap& alsa_cards) {
+ scoped_ptr<MidiManagerAlsa::TemporaryMidiPortState> midi_ports(
+ new TemporaryMidiPortState);
+ // TODO(agoode): Use more information from udev, to allow hardware matching.
+ // See http://crbug.com/486471.
+ auto card_it = alsa_cards.begin();
+
+ int card_midi_device = -1;
+ for (const auto& client_pair : clients_) {
+ int client_id = client_pair.first;
+ const auto& client = client_pair.second;
+
+ // Get client metadata.
+ const std::string client_name = client->name();
std::string manufacturer;
std::string driver;
- // In the current Alsa kernel implementation, hardware clients match the
- // cards in the same order.
- if ((snd_seq_client_info_get_type(client_info) == SND_SEQ_KERNEL_CLIENT) &&
- (current_card < cards.size())) {
- const CardInfo* info = cards[current_card];
- if (info->name_ == client_name) {
- manufacturer = info->manufacturer_;
- driver = info->driver_;
- current_card++;
+ std::string path;
+ MidiPort::Id id;
+ std::string card_name;
+ std::string card_longname;
+ int midi_device = -1;
+
+ if (IsCardClient(client->type(), client_id)) {
+ auto& card = card_it->second;
+ if (card_midi_device == -1)
+ card_midi_device = 0;
+
+ manufacturer = card->manufacturer();
+ path = card->path();
+ id = MidiPort::Id(card->bus(), card->vendor_id(), card->model_id(),
+ card->usb_interface_num(), card->serial());
+ card_name = card->name();
+ card_longname = card->longname();
+ midi_device = card_midi_device;
+
+ ++card_midi_device;
+ if (card_midi_device >= card->midi_device_count()) {
+ card_midi_device = -1;
+ ++card_it;
}
}
- // Enumerate ports.
- while (!snd_seq_query_next_port(in_client_, port_info)) {
- unsigned int port_type = snd_seq_port_info_get_type(port_info);
- if (port_type & SND_SEQ_PORT_TYPE_MIDI_GENERIC) {
- const snd_seq_addr_t* addr = snd_seq_port_info_get_addr(port_info);
- const std::string name = snd_seq_port_info_get_name(port_info);
- const std::string id = base::StringPrintf("%d:%d %s",
- addr->client,
- addr->port,
- name.c_str());
+
+ for (const auto& port_pair : *client) {
+ int port_id = port_pair.first;
+ const auto& port = port_pair.second;
+
+ if (port->midi()) {
std::string version;
- if (driver != "") {
+ if (!driver.empty()) {
version = driver + " / ";
}
- version += base::StringPrintf("ALSA library version %d.%d.%d",
- SND_LIB_MAJOR,
- SND_LIB_MINOR,
- SND_LIB_SUBMINOR);
- unsigned int caps = snd_seq_port_info_get_capability(port_info);
- if ((caps & kRequiredInputPortCaps) == kRequiredInputPortCaps) {
- // Subscribe to this port.
- const snd_seq_addr_t* sender = snd_seq_port_info_get_addr(port_info);
- snd_seq_addr_t dest;
- dest.client = snd_seq_client_id(in_client_);
- dest.port = in_port_;
- snd_seq_port_subscribe_set_sender(subs, sender);
- snd_seq_port_subscribe_set_dest(subs, &dest);
- err = snd_seq_subscribe_port(in_client_, subs);
- if (err != 0) {
- VLOG(1) << "snd_seq_subscribe_port fails: " << snd_strerror(err);
- } else {
- source_map_[AddrToInt(sender)] = current_input++;
- AddInputPort(MidiPortInfo(id, manufacturer, name, version));
- }
+ version +=
+ base::StringPrintf("ALSA library version %d.%d.%d", SND_LIB_MAJOR,
+ SND_LIB_MINOR, SND_LIB_SUBMINOR);
+ PortDirection direction = port->direction();
+ if (direction == PortDirection::kInput ||
+ direction == PortDirection::kDuplex) {
+ midi_ports->Insert(scoped_ptr<MidiPort>(new MidiPort(
+ path, id, client_id, port_id, midi_device, client->name(),
+ port->name(), manufacturer, version, MidiPort::Type::kInput)));
}
- if ((caps & kRequiredOutputPortCaps) == kRequiredOutputPortCaps) {
- // Create a port for us to send on.
- int out_port =
- snd_seq_create_simple_port(out_client_, NULL,
- SND_SEQ_PORT_CAP_READ |
- SND_SEQ_PORT_CAP_NO_EXPORT,
- SND_SEQ_PORT_TYPE_MIDI_GENERIC |
- SND_SEQ_PORT_TYPE_APPLICATION);
- if (out_port < 0) {
- VLOG(1) << "snd_seq_create_simple_port fails: "
- << snd_strerror(out_port);
- // Skip this output port for now.
- continue;
- }
-
- // Activate port subscription.
- snd_seq_addr_t sender;
- const snd_seq_addr_t* dest = snd_seq_port_info_get_addr(port_info);
- sender.client = snd_seq_client_id(out_client_);
- sender.port = out_port;
- snd_seq_port_subscribe_set_sender(subs, &sender);
- snd_seq_port_subscribe_set_dest(subs, dest);
- err = snd_seq_subscribe_port(out_client_, subs);
- if (err != 0) {
- VLOG(1) << "snd_seq_subscribe_port fails: " << snd_strerror(err);
- snd_seq_delete_simple_port(out_client_, out_port);
- } else {
- snd_midi_event_t* encoder;
- snd_midi_event_new(kSendBufferSize, &encoder);
- encoders_.push_back(encoder);
- out_ports_.push_back(out_port);
- AddOutputPort(MidiPortInfo(id, manufacturer, name, version));
- }
+ if (direction == PortDirection::kOutput ||
+ direction == PortDirection::kDuplex) {
+ midi_ports->Insert(scoped_ptr<MidiPort>(new MidiPort(
+ path, id, client_id, port_id, midi_device, client->name(),
+ port->name(), manufacturer, version, MidiPort::Type::kOutput)));
}
}
}
}
- event_thread_.Start();
- event_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&MidiManagerAlsa::EventReset, base::Unretained(this)));
+ return midi_ports.Pass();
+}
- CompleteInitialization(MIDI_OK);
+MidiManagerAlsa::AlsaSeqState::Port::Port(
+ const std::string& name,
+ MidiManagerAlsa::AlsaSeqState::PortDirection direction,
+ bool midi)
+ : name_(name), direction_(direction), midi_(midi) {
}
-MidiManagerAlsa::~MidiManagerAlsa() {
- // Tell the event thread it will soon be time to shut down. This gives
- // us assurance the thread will stop in case the SND_SEQ_EVENT_CLIENT_EXIT
- // message is lost.
- {
- base::AutoLock lock(shutdown_lock_);
- event_thread_shutdown_ = true;
+MidiManagerAlsa::AlsaSeqState::Port::~Port() {
+}
+
+std::string MidiManagerAlsa::AlsaSeqState::Port::name() const {
+ return name_;
+}
+
+MidiManagerAlsa::AlsaSeqState::PortDirection
+MidiManagerAlsa::AlsaSeqState::Port::direction() const {
+ return direction_;
+}
+
+bool MidiManagerAlsa::AlsaSeqState::Port::midi() const {
+ return midi_;
+}
+
+MidiManagerAlsa::AlsaSeqState::Client::Client(const std::string& name,
+ snd_seq_client_type_t type)
+ : name_(name), type_(type), ports_deleter_(&ports_) {
+}
+
+MidiManagerAlsa::AlsaSeqState::Client::~Client() {
+}
+
+std::string MidiManagerAlsa::AlsaSeqState::Client::name() const {
+ return name_;
+}
+
+snd_seq_client_type_t MidiManagerAlsa::AlsaSeqState::Client::type() const {
+ return type_;
+}
+
+void MidiManagerAlsa::AlsaSeqState::Client::AddPort(int addr,
+ scoped_ptr<Port> port) {
+ RemovePort(addr);
+ ports_[addr] = port.release();
+}
+
+void MidiManagerAlsa::AlsaSeqState::Client::RemovePort(int addr) {
+ auto it = ports_.find(addr);
+ if (it != ports_.end()) {
+ delete it->second;
+ ports_.erase(it);
}
+}
- // Stop the send thread.
- send_thread_.Stop();
+MidiManagerAlsa::AlsaSeqState::Client::PortMap::const_iterator
+MidiManagerAlsa::AlsaSeqState::Client::begin() const {
+ return ports_.begin();
+}
- // Close the out client. This will trigger the event thread to stop,
- // because of SND_SEQ_EVENT_CLIENT_EXIT.
- if (out_client_)
- snd_seq_close(out_client_);
+MidiManagerAlsa::AlsaSeqState::Client::PortMap::const_iterator
+MidiManagerAlsa::AlsaSeqState::Client::end() const {
+ return ports_.end();
+}
- // Wait for the event thread to stop.
- event_thread_.Stop();
+MidiManagerAlsa::AlsaCard::AlsaCard(udev_device* dev,
+ const std::string& name,
+ const std::string& longname,
+ const std::string& driver,
+ int midi_device_count)
+ : name_(name),
+ longname_(longname),
+ driver_(driver),
+ path_(device::UdevDeviceGetPropertyValue(dev, kUdevIdPath)),
+ bus_(device::UdevDeviceGetPropertyValue(dev, kUdevIdBus)),
+ vendor_id_(
+ UdevDeviceGetPropertyOrSysattr(dev, kUdevIdVendorId, kSysattrVendor)),
+ model_id_(
+ UdevDeviceGetPropertyOrSysattr(dev, kUdevIdModelId, kSysattrModel)),
+ usb_interface_num_(
+ device::UdevDeviceGetPropertyValue(dev, kUdevIdUsbInterfaceNum)),
+ serial_(UdevDeviceGetPropertyOrSysattr(dev,
+ kUdevIdSerialShort,
+ kSysattrGuid)),
+ midi_device_count_(midi_device_count),
+ manufacturer_(ExtractManufacturerString(
+ GetVendor(dev),
+ vendor_id_,
+ device::UdevDeviceGetPropertyValue(dev, kUdevIdVendorFromDatabase),
+ name,
+ longname)) {
+}
- // Close the in client.
- if (in_client_)
- snd_seq_close(in_client_);
+MidiManagerAlsa::AlsaCard::~AlsaCard() {
+}
- // Free the decoder.
- snd_midi_event_free(decoder_);
+// static
+std::string MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ const std::string& udev_id_vendor,
+ const std::string& udev_id_vendor_id,
+ const std::string& udev_id_vendor_from_database,
+ const std::string& alsa_name,
+ const std::string& alsa_longname) {
+ // Let's try to determine the manufacturer. Here is the ordered preference
+ // in extraction:
+ // 1. Vendor name from the hardware device string, from udev properties
+ // or sysattrs.
+ // 2. Vendor name from the udev database (property ID_VENDOR_FROM_DATABASE).
+ // 3. Heuristic from ALSA.
+
+ // Is the vendor string present and not just the vendor hex id?
+ if (!udev_id_vendor.empty() && (udev_id_vendor != udev_id_vendor_id)) {
+ return udev_id_vendor;
+ }
+
+ // Is there a vendor string in the hardware database?
+ if (!udev_id_vendor_from_database.empty()) {
+ return udev_id_vendor_from_database;
+ }
- // Free the encoders.
- for (EncoderList::iterator i = encoders_.begin(); i != encoders_.end(); ++i)
- snd_midi_event_free(*i);
+ // Ok, udev gave us nothing useful, or was unavailable. So try a heuristic.
+ // We assume that card longname is in the format of
+ // "<manufacturer> <name> at <bus>". Otherwise, we give up to detect
+ // a manufacturer name here.
+ size_t at_index = alsa_longname.rfind(" at ");
+ if (at_index && at_index != std::string::npos) {
+ size_t name_index = alsa_longname.rfind(alsa_name, at_index - 1);
+ if (name_index && name_index != std::string::npos)
+ return alsa_longname.substr(0, name_index - 1);
+ }
+
+ // Failure.
+ return "";
}
void MidiManagerAlsa::SendMidiData(uint32 port_index,
const std::vector<uint8>& data) {
DCHECK(send_thread_.message_loop_proxy()->BelongsToCurrentThread());
- snd_midi_event_t* encoder = encoders_[port_index];
+ snd_midi_event_t* encoder;
+ snd_midi_event_new(kSendBufferSize, &encoder);
for (unsigned int i = 0; i < data.size(); i++) {
snd_seq_event_t event;
int result = snd_midi_event_encode_byte(encoder, data[i], &event);
if (result == 1) {
// Full event, send it.
- snd_seq_ev_set_source(&event, out_ports_[port_index]);
- snd_seq_ev_set_subs(&event);
- snd_seq_ev_set_direct(&event);
- snd_seq_event_output_direct(out_client_, &event);
+ base::AutoLock lock(out_ports_lock_);
+ auto it = out_ports_.find(port_index);
+ if (it != out_ports_.end()) {
+ snd_seq_ev_set_source(&event, it->second);
+ snd_seq_ev_set_subs(&event);
+ snd_seq_ev_set_direct(&event);
+ snd_seq_event_output_direct(out_client_, &event);
+ }
}
}
+ snd_midi_event_free(encoder);
}
-void MidiManagerAlsa::DispatchSendMidiData(MidiManagerClient* client,
- uint32 port_index,
- const std::vector<uint8>& data,
- double timestamp) {
- if (out_ports_.size() <= port_index)
+void MidiManagerAlsa::ScheduleEventLoop() {
+ event_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiManagerAlsa::EventLoop, base::Unretained(this)));
+}
+
+void MidiManagerAlsa::EventLoop() {
+ bool loop_again = true;
+
+ struct pollfd pfd[2];
+ snd_seq_poll_descriptors(in_client_, &pfd[0], 1, POLLIN);
+ pfd[1].fd = device::udev_monitor_get_fd(udev_monitor_.get());
+ pfd[1].events = POLLIN;
+
+ int err = HANDLE_EINTR(poll(pfd, arraysize(pfd), -1));
+ if (err < 0) {
+ VLOG(1) << "poll fails: " << safe_strerror(errno);
+ loop_again = false;
+ } else {
+ if (pfd[0].revents & POLLIN) {
+ // Read available incoming MIDI data.
+ int remaining;
+ double timestamp =
+ (base::TimeTicks::Now() - base::TimeTicks()).InSecondsF();
+ do {
+ snd_seq_event_t* event;
+ err = snd_seq_event_input(in_client_, &event);
+ remaining = snd_seq_event_input_pending(in_client_, 0);
+
+ if (err == -ENOSPC) {
+ // Handle out of space error.
+ VLOG(1) << "snd_seq_event_input detected buffer overrun";
+ // We've lost events: check another way to see if we need to shut
+ // down.
+ base::AutoLock lock(shutdown_lock_);
+ if (event_thread_shutdown_)
+ loop_again = false;
+ } else if (err == -EAGAIN) {
+ // We've read all the data.
+ } else if (err < 0) {
+ // Handle other errors.
+ VLOG(1) << "snd_seq_event_input fails: " << snd_strerror(err);
+ // TODO(agoode): Use RecordAction() or similar to log this.
+ loop_again = false;
+ } else if (event->source.client == SND_SEQ_CLIENT_SYSTEM &&
+ event->source.port == SND_SEQ_PORT_SYSTEM_ANNOUNCE) {
+ // Handle announce events.
+ switch (event->type) {
+ case SND_SEQ_EVENT_PORT_START:
+ // Don't use SND_SEQ_EVENT_CLIENT_START because the
+ // client name may not be set by the time we query
+ // it. It should be set by the time ports are made.
+ ProcessClientStartEvent(event->data.addr.client);
+ ProcessPortStartEvent(event->data.addr);
+ break;
+ case SND_SEQ_EVENT_CLIENT_EXIT:
+ // Check for disconnection of our "out" client. This means "shut
+ // down".
+ if (event->data.addr.client == out_client_id_) {
+ loop_again = false;
+ remaining = 0;
+ } else
+ ProcessClientExitEvent(event->data.addr);
+ break;
+ case SND_SEQ_EVENT_PORT_EXIT:
+ ProcessPortExitEvent(event->data.addr);
+ break;
+ }
+ } else {
+ // Normal operation.
+ ProcessSingleEvent(event, timestamp);
+ }
+ } while (remaining > 0);
+ }
+ if (pfd[1].revents & POLLIN) {
+ device::ScopedUdevDevicePtr dev(
+ device::udev_monitor_receive_device(udev_monitor_.get()));
+ if (dev.get())
+ ProcessUdevEvent(dev.get());
+ else
+ VLOG(1) << "udev_monitor_receive_device fails";
+ }
+ }
+
+ // Do again.
+ if (loop_again)
+ ScheduleEventLoop();
+}
+
+void MidiManagerAlsa::ProcessSingleEvent(snd_seq_event_t* event,
+ double timestamp) {
+ auto source_it =
+ source_map_.find(AddrToInt(event->source.client, event->source.port));
+ if (source_it != source_map_.end()) {
+ uint32 source = source_it->second;
+ if (event->type == SND_SEQ_EVENT_SYSEX) {
+ // Special! Variable-length sysex.
+ ReceiveMidiData(source, static_cast<const uint8*>(event->data.ext.ptr),
+ event->data.ext.len, timestamp);
+ } else {
+ // Otherwise, decode this and send that on.
+ unsigned char buf[12];
+ long count = snd_midi_event_decode(decoder_, buf, sizeof(buf), event);
+ if (count <= 0) {
+ if (count != -ENOENT) {
+ // ENOENT means that it's not a MIDI message, which is not an
+ // error, but other negative values are errors for us.
+ VLOG(1) << "snd_midi_event_decoder fails " << snd_strerror(count);
+ // TODO(agoode): Record this failure.
+ }
+ } else {
+ ReceiveMidiData(source, buf, count, timestamp);
+ }
+ }
+ }
+}
+
+void MidiManagerAlsa::ProcessClientStartEvent(int client_id) {
+ // Ignore if client is already started.
+ if (alsa_seq_state_.ClientStarted(client_id))
return;
- // Not correct right now. http://crbug.com/374341.
- if (!send_thread_.IsRunning())
- send_thread_.Start();
+ snd_seq_client_info_t* client_info;
+ snd_seq_client_info_alloca(&client_info);
+ int err = snd_seq_get_any_client_info(in_client_, client_id, client_info);
+ if (err != 0)
+ return;
- base::TimeDelta delay;
- if (timestamp != 0.0) {
- base::TimeTicks time_to_send =
- base::TimeTicks() + base::TimeDelta::FromMicroseconds(
- timestamp * base::Time::kMicrosecondsPerSecond);
- delay = std::max(time_to_send - base::TimeTicks::Now(), base::TimeDelta());
+ // Skip our own clients.
+ if ((client_id == in_client_id_) || (client_id == out_client_id_))
+ return;
+
+ // Update our view of ALSA seq state.
+ alsa_seq_state_.ClientStart(client_id,
+ snd_seq_client_info_get_name(client_info),
+ snd_seq_client_info_get_type(client_info));
+
+ // Generate Web MIDI events.
+ UpdatePortStateAndGenerateEvents();
+}
+
+void MidiManagerAlsa::ProcessPortStartEvent(const snd_seq_addr_t& addr) {
+ snd_seq_port_info_t* port_info;
+ snd_seq_port_info_alloca(&port_info);
+ int err =
+ snd_seq_get_any_port_info(in_client_, addr.client, addr.port, port_info);
+ if (err != 0)
+ return;
+
+ unsigned int caps = snd_seq_port_info_get_capability(port_info);
+ bool input = (caps & kRequiredInputPortCaps) == kRequiredInputPortCaps;
+ bool output = (caps & kRequiredOutputPortCaps) == kRequiredOutputPortCaps;
+ AlsaSeqState::PortDirection direction;
+ if (input && output)
+ direction = AlsaSeqState::PortDirection::kDuplex;
+ else if (input)
+ direction = AlsaSeqState::PortDirection::kInput;
+ else if (output)
+ direction = AlsaSeqState::PortDirection::kOutput;
+ else
+ return;
+
+ // Update our view of ALSA seq state.
+ alsa_seq_state_.PortStart(
+ addr.client, addr.port, snd_seq_port_info_get_name(port_info), direction,
+ snd_seq_port_info_get_type(port_info) & SND_SEQ_PORT_TYPE_MIDI_GENERIC);
+ // Generate Web MIDI events.
+ UpdatePortStateAndGenerateEvents();
+}
+
+void MidiManagerAlsa::ProcessClientExitEvent(const snd_seq_addr_t& addr) {
+ // Update our view of ALSA seq state.
+ alsa_seq_state_.ClientExit(addr.client);
+ // Generate Web MIDI events.
+ UpdatePortStateAndGenerateEvents();
+}
+
+void MidiManagerAlsa::ProcessPortExitEvent(const snd_seq_addr_t& addr) {
+ // Update our view of ALSA seq state.
+ alsa_seq_state_.PortExit(addr.client, addr.port);
+ // Generate Web MIDI events.
+ UpdatePortStateAndGenerateEvents();
+}
+
+void MidiManagerAlsa::ProcessUdevEvent(udev_device* dev) {
+ // Only card devices have this property set, and only when they are
+ // fully initialized.
+ if (!device::udev_device_get_property_value(dev,
+ kUdevPropertySoundInitialized))
+ return;
+
+ // Get the action. If no action, then we are doing first time enumeration
+ // and the device is treated as new.
+ const char* action = device::udev_device_get_action(dev);
+ if (!action)
+ action = kUdevActionChange;
+
+ if (strcmp(action, kUdevActionChange) == 0) {
+ AddCard(dev);
+ // Generate Web MIDI events.
+ UpdatePortStateAndGenerateEvents();
+ } else if (strcmp(action, kUdevActionRemove) == 0) {
+ RemoveCard(GetCardNumber(dev));
+ // Generate Web MIDI events.
+ UpdatePortStateAndGenerateEvents();
}
+}
- send_thread_.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&MidiManagerAlsa::SendMidiData, base::Unretained(this),
- port_index, data), delay);
+void MidiManagerAlsa::AddCard(udev_device* dev) {
+ int number = GetCardNumber(dev);
+ if (number == -1)
+ return;
- // Acknowledge send.
- send_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&MidiManagerClient::AccumulateMidiBytesSent,
- base::Unretained(client), data.size()));
+ RemoveCard(number);
+
+ snd_ctl_card_info_t* card;
+ snd_hwdep_info_t* hwdep;
+ snd_ctl_card_info_alloca(&card);
+ snd_hwdep_info_alloca(&hwdep);
+ const std::string id = base::StringPrintf("hw:CARD=%i", number);
+ snd_ctl_t* handle;
+ int err = snd_ctl_open(&handle, id.c_str(), 0);
+ if (err != 0) {
+ VLOG(1) << "snd_ctl_open fails: " << snd_strerror(err);
+ return;
+ }
+ err = snd_ctl_card_info(handle, card);
+ if (err != 0) {
+ VLOG(1) << "snd_ctl_card_info fails: " << snd_strerror(err);
+ snd_ctl_close(handle);
+ return;
+ }
+ std::string name = snd_ctl_card_info_get_name(card);
+ std::string longname = snd_ctl_card_info_get_longname(card);
+ std::string driver = snd_ctl_card_info_get_driver(card);
+
+ // Count rawmidi devices (not subdevices).
+ int midi_count = 0;
+ for (int device = -1;
+ !snd_ctl_rawmidi_next_device(handle, &device) && device >= 0;)
+ ++midi_count;
+
+ // Count any hwdep synths that become MIDI devices outside of rawmidi.
+ //
+ // Explanation:
+ // Any kernel driver can create an ALSA client (visible to us).
+ // With modern hardware, only rawmidi devices do this. Kernel
+ // drivers create rawmidi devices and the rawmidi subsystem makes
+ // the seq clients. But the OPL3 driver is special, it does not
+ // make a rawmidi device but a seq client directly. (This is the
+ // only one to worry about in the kernel code, as of 2015-03-23.)
+ //
+ // OPL3 is very old (but still possible to get in new
+ // hardware). It is unlikely that new drivers would not use
+ // rawmidi and defeat our heuristic.
+ //
+ // Longer term, support should be added in the kernel to expose a
+ // direct link from card->client (or client->card) so that all
+ // these heuristics will be obsolete. Once that is there, we can
+ // assume our old heuristics will work on old kernels and the new
+ // robust code will be used on new. Then we will not need to worry
+ // about changes to kernel internals breaking our code.
+ // See the TODO above at kMinimumClientIdForCards.
+ for (int device = -1;
+ !snd_ctl_hwdep_next_device(handle, &device) && device >= 0;) {
+ err = snd_ctl_hwdep_info(handle, hwdep);
+ if (err != 0) {
+ VLOG(1) << "snd_ctl_hwdep_info fails: " << snd_strerror(err);
+ continue;
+ }
+ snd_hwdep_iface_t iface = snd_hwdep_info_get_iface(hwdep);
+ if (iface == SND_HWDEP_IFACE_OPL2 || iface == SND_HWDEP_IFACE_OPL3 ||
+ iface == SND_HWDEP_IFACE_OPL4)
+ ++midi_count;
+ }
+ snd_ctl_close(handle);
+
+ if (midi_count > 0)
+ alsa_cards_[number] = new AlsaCard(dev, name, longname, driver, midi_count);
+ alsa_card_midi_count_ += midi_count;
}
-void MidiManagerAlsa::EventReset() {
- event_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&MidiManagerAlsa::EventLoop, base::Unretained(this)));
+void MidiManagerAlsa::RemoveCard(int number) {
+ auto it = alsa_cards_.find(number);
+ if (it == alsa_cards_.end())
+ return;
+
+ alsa_card_midi_count_ -= it->second->midi_device_count();
+ delete it->second;
+ alsa_cards_.erase(it);
}
-void MidiManagerAlsa::EventLoop() {
- // Read available incoming MIDI data.
- snd_seq_event_t* event;
- int err = snd_seq_event_input(in_client_, &event);
- double timestamp =
- (base::TimeTicks::HighResNow() - base::TimeTicks()).InSecondsF();
- if (err == -ENOSPC) {
- VLOG(1) << "snd_seq_event_input detected buffer overrun";
-
- // We've lost events: check another way to see if we need to shut down.
- base::AutoLock lock(shutdown_lock_);
- if (event_thread_shutdown_) {
- return;
+void MidiManagerAlsa::UpdatePortStateAndGenerateEvents() {
+ // Verify that our information from ALSA and udev are in sync. If
+ // not, we cannot generate events right now.
+ if (alsa_card_midi_count_ != alsa_seq_state_.card_client_count())
+ return;
+
+ // Generate new port state.
+ auto new_port_state = alsa_seq_state_.ToMidiPortState(alsa_cards_);
+
+ // Disconnect any connected old ports that are now missing.
+ for (auto* old_port : port_state_) {
+ if (old_port->connected() &&
+ (new_port_state->FindConnected(*old_port) == new_port_state->end())) {
+ old_port->set_connected(false);
+ uint32 web_port_index = old_port->web_port_index();
+ switch (old_port->type()) {
+ case MidiPort::Type::kInput:
+ source_map_.erase(
+ AddrToInt(old_port->client_id(), old_port->port_id()));
+ SetInputPortState(web_port_index, MIDI_PORT_DISCONNECTED);
+ break;
+ case MidiPort::Type::kOutput:
+ DeleteAlsaOutputPort(web_port_index);
+ SetOutputPortState(web_port_index, MIDI_PORT_DISCONNECTED);
+ break;
}
- } else if (err < 0) {
- VLOG(1) << "snd_seq_event_input fails: " << snd_strerror(err);
- return;
- } else {
- // Check for disconnection of out client. This means "shut down".
- if (event->source.client == SND_SEQ_CLIENT_SYSTEM &&
- event->source.port == SND_SEQ_PORT_SYSTEM_ANNOUNCE &&
- event->type == SND_SEQ_EVENT_CLIENT_EXIT &&
- event->data.addr.client == out_client_id_) {
- return;
}
+ }
- std::map<int, uint32>::iterator source_it =
- source_map_.find(AddrToInt(&event->source));
- if (source_it != source_map_.end()) {
- uint32 source = source_it->second;
- if (event->type == SND_SEQ_EVENT_SYSEX) {
- // Special! Variable-length sysex.
- ReceiveMidiData(source, static_cast<const uint8*>(event->data.ext.ptr),
- event->data.ext.len,
- timestamp);
- } else {
- // Otherwise, decode this and send that on.
- unsigned char buf[12];
- long count = snd_midi_event_decode(decoder_, buf, sizeof(buf), event);
- if (count <= 0) {
- if (count != -ENOENT) {
- // ENOENT means that it's not a MIDI message, which is not an
- // error, but other negative values are errors for us.
- VLOG(1) << "snd_midi_event_decoder fails " << snd_strerror(count);
- }
- } else {
- ReceiveMidiData(source, buf, count, timestamp);
- }
+ // Reconnect or add new ports.
+ auto it = new_port_state->begin();
+ while (it != new_port_state->end()) {
+ auto* new_port = *it;
+ auto old_port = port_state_.Find(*new_port);
+ if (old_port == port_state_.end()) {
+ // Add new port.
+ uint32 web_port_index =
+ port_state_.Insert(scoped_ptr<MidiPort>(new_port));
+ MidiPortInfo info(new_port->OpaqueKey(), new_port->manufacturer(),
+ new_port->port_name(), new_port->version(),
+ MIDI_PORT_OPENED);
+ switch (new_port->type()) {
+ case MidiPort::Type::kInput:
+ if (Subscribe(web_port_index, new_port->client_id(),
+ new_port->port_id()))
+ AddInputPort(info);
+ break;
+ case MidiPort::Type::kOutput:
+ if (CreateAlsaOutputPort(web_port_index, new_port->client_id(),
+ new_port->port_id()))
+ AddOutputPort(info);
+ break;
+ }
+ it = new_port_state->weak_erase(it);
+ } else if (!(*old_port)->connected()) {
+ // Reconnect.
+ uint32 web_port_index = (*old_port)->web_port_index();
+ (*old_port)->Update(new_port->path(), new_port->client_id(),
+ new_port->port_id(), new_port->client_name(),
+ new_port->port_name(), new_port->manufacturer(),
+ new_port->version());
+ switch ((*old_port)->type()) {
+ case MidiPort::Type::kInput:
+ if (Subscribe(web_port_index, (*old_port)->client_id(),
+ (*old_port)->port_id()))
+ SetInputPortState(web_port_index, MIDI_PORT_OPENED);
+ break;
+ case MidiPort::Type::kOutput:
+ if (CreateAlsaOutputPort(web_port_index, (*old_port)->client_id(),
+ (*old_port)->port_id()))
+ SetOutputPortState(web_port_index, MIDI_PORT_OPENED);
+ break;
}
+ (*old_port)->set_connected(true);
+ ++it;
+ } else {
+ ++it;
+ }
+ }
+}
+
+// TODO(agoode): return false on failure.
+void MidiManagerAlsa::EnumerateAlsaPorts() {
+ snd_seq_client_info_t* client_info;
+ snd_seq_client_info_alloca(&client_info);
+ snd_seq_port_info_t* port_info;
+ snd_seq_port_info_alloca(&port_info);
+
+ // Enumerate clients.
+ snd_seq_client_info_set_client(client_info, -1);
+ while (!snd_seq_query_next_client(in_client_, client_info)) {
+ int client_id = snd_seq_client_info_get_client(client_info);
+ ProcessClientStartEvent(client_id);
+
+ // Enumerate ports.
+ snd_seq_port_info_set_client(port_info, client_id);
+ snd_seq_port_info_set_port(port_info, -1);
+ while (!snd_seq_query_next_port(in_client_, port_info)) {
+ const snd_seq_addr_t* addr = snd_seq_port_info_get_addr(port_info);
+ ProcessPortStartEvent(*addr);
}
}
+}
- // Do again.
- event_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&MidiManagerAlsa::EventLoop, base::Unretained(this)));
+bool MidiManagerAlsa::EnumerateUdevCards() {
+ int err;
+
+ device::ScopedUdevEnumeratePtr enumerate(
+ device::udev_enumerate_new(udev_.get()));
+ if (!enumerate.get()) {
+ VLOG(1) << "udev_enumerate_new fails";
+ return false;
+ }
+
+ err = device::udev_enumerate_add_match_subsystem(enumerate.get(),
+ kUdevSubsystemSound);
+ if (err) {
+ VLOG(1) << "udev_enumerate_add_match_subsystem fails: "
+ << safe_strerror(-err);
+ return false;
+ }
+
+ err = device::udev_enumerate_scan_devices(enumerate.get());
+ if (err) {
+ VLOG(1) << "udev_enumerate_scan_devices fails: " << safe_strerror(-err);
+ return false;
+ }
+
+ udev_list_entry* list_entry;
+ auto* devices = device::udev_enumerate_get_list_entry(enumerate.get());
+ udev_list_entry_foreach(list_entry, devices) {
+ const char* path = device::udev_list_entry_get_name(list_entry);
+ device::ScopedUdevDevicePtr dev(
+ device::udev_device_new_from_syspath(udev_.get(), path));
+ if (dev.get())
+ ProcessUdevEvent(dev.get());
+ }
+
+ return true;
+}
+
+bool MidiManagerAlsa::CreateAlsaOutputPort(uint32 port_index,
+ int client_id,
+ int port_id) {
+ // Create the port.
+ int out_port = snd_seq_create_simple_port(
+ out_client_, NULL, kCreateOutputPortCaps, kCreatePortType);
+ if (out_port < 0) {
+ VLOG(1) << "snd_seq_create_simple_port fails: " << snd_strerror(out_port);
+ return false;
+ }
+ // Activate port subscription.
+ snd_seq_port_subscribe_t* subs;
+ snd_seq_port_subscribe_alloca(&subs);
+ snd_seq_addr_t sender;
+ sender.client = out_client_id_;
+ sender.port = out_port;
+ snd_seq_port_subscribe_set_sender(subs, &sender);
+ snd_seq_addr_t dest;
+ dest.client = client_id;
+ dest.port = port_id;
+ snd_seq_port_subscribe_set_dest(subs, &dest);
+ int err = snd_seq_subscribe_port(out_client_, subs);
+ if (err != 0) {
+ VLOG(1) << "snd_seq_subscribe_port fails: " << snd_strerror(err);
+ snd_seq_delete_simple_port(out_client_, out_port);
+ return false;
+ }
+
+ // Update our map.
+ base::AutoLock lock(out_ports_lock_);
+ out_ports_[port_index] = out_port;
+ return true;
+}
+
+void MidiManagerAlsa::DeleteAlsaOutputPort(uint32 port_index) {
+ base::AutoLock lock(out_ports_lock_);
+ auto it = out_ports_.find(port_index);
+ if (it == out_ports_.end())
+ return;
+
+ int alsa_port = it->second;
+ snd_seq_delete_simple_port(out_client_, alsa_port);
+ out_ports_.erase(it);
+}
+
+bool MidiManagerAlsa::Subscribe(uint32 port_index, int client_id, int port_id) {
+ // Activate port subscription.
+ snd_seq_port_subscribe_t* subs;
+ snd_seq_port_subscribe_alloca(&subs);
+ snd_seq_addr_t sender;
+ sender.client = client_id;
+ sender.port = port_id;
+ snd_seq_port_subscribe_set_sender(subs, &sender);
+ snd_seq_addr_t dest;
+ dest.client = in_client_id_;
+ dest.port = in_port_id_;
+ snd_seq_port_subscribe_set_dest(subs, &dest);
+ int err = snd_seq_subscribe_port(in_client_, subs);
+ if (err != 0) {
+ VLOG(1) << "snd_seq_subscribe_port fails: " << snd_strerror(err);
+ return false;
+ }
+
+ // Update our map.
+ source_map_[AddrToInt(client_id, port_id)] = port_index;
+ return true;
}
MidiManager* MidiManager::Create() {
return new MidiManagerAlsa();
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager_alsa.h b/chromium/media/midi/midi_manager_alsa.h
index cbe03c5fee7..f2a71ab1be9 100644
--- a/chromium/media/midi/midi_manager_alsa.h
+++ b/chromium/media/midi/midi_manager_alsa.h
@@ -10,14 +10,21 @@
#include <vector>
#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/stl_util.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread.h"
+#include "base/values.h"
+#include "device/udev_linux/scoped_udev.h"
+#include "media/midi/midi_export.h"
#include "media/midi/midi_manager.h"
namespace media {
+namespace midi {
-class MidiManagerAlsa : public MidiManager {
+class MIDI_EXPORT MidiManagerAlsa final : public MidiManager {
public:
MidiManagerAlsa();
~MidiManagerAlsa() override;
@@ -30,40 +37,395 @@ class MidiManagerAlsa : public MidiManager {
double timestamp) override;
private:
+ friend class MidiManagerAlsaTest;
+ FRIEND_TEST_ALL_PREFIXES(MidiManagerAlsaTest, ExtractManufacturer);
+ FRIEND_TEST_ALL_PREFIXES(MidiManagerAlsaTest, ToMidiPortState);
+
+ class AlsaCard;
+ typedef std::map<int, AlsaCard*> AlsaCardMap;
+
+ class MidiPort {
+ public:
+ enum class Type { kInput, kOutput };
+
+ // The Id class is used to keep the multiple strings separate
+ // but compare them all together for equality purposes.
+ // The individual strings that make up the Id can theoretically contain
+ // arbitrary characters, so unfortunately there is no simple way to
+ // concatenate them into a single string.
+ class Id final {
+ public:
+ Id();
+ Id(const std::string& bus,
+ const std::string& vendor_id,
+ const std::string& model_id,
+ const std::string& usb_interface_num,
+ const std::string& serial);
+ Id(const Id&);
+ ~Id();
+ bool operator==(const Id&) const;
+ bool empty() const;
+
+ std::string bus() const { return bus_; }
+ std::string vendor_id() const { return vendor_id_; }
+ std::string model_id() const { return model_id_; }
+ std::string usb_interface_num() const { return usb_interface_num_; }
+ std::string serial() const { return serial_; }
+
+ private:
+ std::string bus_;
+ std::string vendor_id_;
+ std::string model_id_;
+ std::string usb_interface_num_;
+ std::string serial_;
+ };
+
+ MidiPort(const std::string& path,
+ const Id& id,
+ int client_id,
+ int port_id,
+ int midi_device,
+ const std::string& client_name,
+ const std::string& port_name,
+ const std::string& manufacturer,
+ const std::string& version,
+ Type type);
+ ~MidiPort();
+
+ // Gets a Value representation of this object, suitable for serialization.
+ scoped_ptr<base::Value> Value() const;
+
+ // Gets a string version of Value in JSON format.
+ std::string JSONValue() const;
+
+ // Gets an opaque identifier for this object, suitable for using as the id
+ // field in MidiPort.id on the web. Note that this string does not store
+ // the full state.
+ std::string OpaqueKey() const;
+
+ // Checks for equality for connected ports.
+ bool MatchConnected(const MidiPort& query) const;
+ // Checks for equality for kernel cards with id, pass 1.
+ bool MatchCardPass1(const MidiPort& query) const;
+ // Checks for equality for kernel cards with id, pass 2.
+ bool MatchCardPass2(const MidiPort& query) const;
+ // Checks for equality for non-card clients, pass 1.
+ bool MatchNoCardPass1(const MidiPort& query) const;
+ // Checks for equality for non-card clients, pass 2.
+ bool MatchNoCardPass2(const MidiPort& query) const;
+
+ // accessors
+ std::string path() const { return path_; }
+ Id id() const { return id_; }
+ std::string client_name() const { return client_name_; }
+ std::string port_name() const { return port_name_; }
+ std::string manufacturer() const { return manufacturer_; }
+ std::string version() const { return version_; }
+ int client_id() const { return client_id_; }
+ int port_id() const { return port_id_; }
+ int midi_device() const { return midi_device_; }
+ Type type() const { return type_; }
+ uint32 web_port_index() const { return web_port_index_; }
+ bool connected() const { return connected_; }
+
+ // mutators
+ void set_web_port_index(uint32 web_port_index) {
+ web_port_index_ = web_port_index;
+ }
+ void set_connected(bool connected) { connected_ = connected; }
+ void Update(const std::string& path,
+ int client_id,
+ int port_id,
+ const std::string& client_name,
+ const std::string& port_name,
+ const std::string& manufacturer,
+ const std::string& version) {
+ path_ = path;
+ client_id_ = client_id;
+ port_id_ = port_id;
+ client_name_ = client_name;
+ port_name_ = port_name;
+ manufacturer_ = manufacturer;
+ version_ = version;
+ }
+
+ private:
+ // Immutable properties.
+ const Id id_;
+ const int midi_device_;
+
+ const Type type_;
+
+ // Mutable properties. These will get updated as ports move around or
+ // drivers change.
+ std::string path_;
+ int client_id_;
+ int port_id_;
+ std::string client_name_;
+ std::string port_name_;
+ std::string manufacturer_;
+ std::string version_;
+
+ // Index for MidiManager.
+ uint32 web_port_index_;
+
+ // Port is present in the ALSA system.
+ bool connected_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiPort);
+ };
+
+ class MidiPortStateBase {
+ public:
+ typedef ScopedVector<MidiPort>::iterator iterator;
+
+ virtual ~MidiPortStateBase();
+
+ // Given a port, finds a port in the internal store.
+ iterator Find(const MidiPort& port);
+
+ // Given a port, finds a connected port, using exact matching.
+ iterator FindConnected(const MidiPort& port);
+
+ // Given a port, finds a disconnected port, using heuristic matching.
+ iterator FindDisconnected(const MidiPort& port);
+
+ iterator begin() { return ports_.begin(); }
+ iterator end() { return ports_.end(); }
+
+ protected:
+ MidiPortStateBase();
+
+ ScopedVector<MidiPort>* ports();
+
+ private:
+ ScopedVector<MidiPort> ports_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiPortStateBase);
+ };
+
+ class TemporaryMidiPortState final : public MidiPortStateBase {
+ public:
+ // Removes a port from the list without deleting it.
+ iterator weak_erase(iterator position) {
+ return ports()->weak_erase(position);
+ }
+
+ void Insert(scoped_ptr<MidiPort> port);
+ };
+
+ class MidiPortState final : public MidiPortStateBase {
+ public:
+ MidiPortState();
+
+ // Inserts a port. Returns web_port_index.
+ uint32 Insert(scoped_ptr<MidiPort> port);
+
+ private:
+ uint32 num_input_ports_;
+ uint32 num_output_ports_;
+ };
+
+ class AlsaSeqState {
+ public:
+ enum class PortDirection { kInput, kOutput, kDuplex };
+
+ AlsaSeqState();
+ ~AlsaSeqState();
+
+ void ClientStart(int client_id,
+ const std::string& client_name,
+ snd_seq_client_type_t type);
+ bool ClientStarted(int client_id);
+ void ClientExit(int client_id);
+ void PortStart(int client_id,
+ int port_id,
+ const std::string& port_name,
+ PortDirection direction,
+ bool midi);
+ void PortExit(int client_id, int port_id);
+ snd_seq_client_type_t ClientType(int client_id) const;
+ scoped_ptr<TemporaryMidiPortState> ToMidiPortState(
+ const AlsaCardMap& alsa_cards);
+
+ int card_client_count() { return card_client_count_; }
+
+ private:
+ class Port {
+ public:
+ Port(const std::string& name, PortDirection direction, bool midi);
+ ~Port();
+
+ std::string name() const;
+ PortDirection direction() const;
+ // True if this port is a MIDI port, instead of another kind of ALSA port.
+ bool midi() const;
+
+ private:
+ const std::string name_;
+ const PortDirection direction_;
+ const bool midi_;
+
+ DISALLOW_COPY_AND_ASSIGN(Port);
+ };
+
+ class Client {
+ public:
+ typedef std::map<int, Port*> PortMap;
+
+ Client(const std::string& name, snd_seq_client_type_t type);
+ ~Client();
+
+ std::string name() const;
+ snd_seq_client_type_t type() const;
+ void AddPort(int addr, scoped_ptr<Port> port);
+ void RemovePort(int addr);
+ PortMap::const_iterator begin() const;
+ PortMap::const_iterator end() const;
+
+ private:
+ const std::string name_;
+ const snd_seq_client_type_t type_;
+ PortMap ports_;
+ STLValueDeleter<PortMap> ports_deleter_;
+
+ DISALLOW_COPY_AND_ASSIGN(Client);
+ };
+
+ typedef std::map<int, Client*> ClientMap;
+
+ ClientMap clients_;
+ STLValueDeleter<ClientMap> clients_deleter_;
+
+ // This is the current number of clients we know about that have
+ // cards. When this number matches alsa_card_midi_count_, we know
+ // we are in sync between ALSA and udev. Until then, we cannot generate
+ // MIDIConnectionEvents to web clients.
+ int card_client_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(AlsaSeqState);
+ };
+
+ class AlsaCard {
+ public:
+ AlsaCard(udev_device* dev,
+ const std::string& name,
+ const std::string& longname,
+ const std::string& driver,
+ int midi_device_count);
+ ~AlsaCard();
+ std::string name() const { return name_; }
+ std::string longname() const { return longname_; }
+ std::string driver() const { return driver_; }
+ std::string path() const { return path_; }
+ std::string bus() const { return bus_; }
+ std::string vendor_id() const { return vendor_id_; }
+ std::string model_id() const { return model_id_; }
+ std::string usb_interface_num() const { return usb_interface_num_; }
+ std::string serial() const { return serial_; }
+ int midi_device_count() const { return midi_device_count_; }
+ std::string manufacturer() const { return manufacturer_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(MidiManagerAlsaTest, ExtractManufacturer);
+
+ // Extracts the manufacturer using heuristics and a variety of sources.
+ static std::string ExtractManufacturerString(
+ const std::string& udev_id_vendor,
+ const std::string& udev_id_vendor_id,
+ const std::string& udev_id_vendor_from_database,
+ const std::string& name,
+ const std::string& longname);
+
+ const std::string name_;
+ const std::string longname_;
+ const std::string driver_;
+ const std::string path_;
+ const std::string bus_;
+ const std::string vendor_id_;
+ const std::string model_id_;
+ const std::string usb_interface_num_;
+ const std::string serial_;
+ const int midi_device_count_;
+ const std::string manufacturer_;
+
+ DISALLOW_COPY_AND_ASSIGN(AlsaCard);
+ };
+
+ typedef base::hash_map<int, uint32> SourceMap;
+ typedef base::hash_map<uint32, int> OutPortMap;
+
// An internal callback that runs on MidiSendThread.
- void SendMidiData(uint32 port_index,
- const std::vector<uint8>& data);
+ void SendMidiData(uint32 port_index, const std::vector<uint8>& data);
- void EventReset();
+ void ScheduleEventLoop();
void EventLoop();
+ void ProcessSingleEvent(snd_seq_event_t* event, double timestamp);
+ void ProcessClientStartEvent(int client_id);
+ void ProcessPortStartEvent(const snd_seq_addr_t& addr);
+ void ProcessClientExitEvent(const snd_seq_addr_t& addr);
+ void ProcessPortExitEvent(const snd_seq_addr_t& addr);
+ void ProcessUdevEvent(udev_device* dev);
+ void AddCard(udev_device* dev);
+ void RemoveCard(int number);
+
+ // Updates port_state_ and Web MIDI state from alsa_seq_state_.
+ void UpdatePortStateAndGenerateEvents();
+
+ // Enumerates ports. Call once after subscribing to the announce port.
+ void EnumerateAlsaPorts();
+ // Enumerates udev cards. Call once after initializing the udev monitor.
+ bool EnumerateUdevCards();
+ // Returns true if successful.
+ bool CreateAlsaOutputPort(uint32 port_index, int client_id, int port_id);
+ void DeleteAlsaOutputPort(uint32 port_index);
+ // Returns true if successful.
+ bool Subscribe(uint32 port_index, int client_id, int port_id);
+
+ AlsaSeqState alsa_seq_state_;
+ MidiPortState port_state_;
- // Alsa seq handles.
+ // ALSA seq handles.
snd_seq_t* in_client_;
+ int in_client_id_;
snd_seq_t* out_client_;
int out_client_id_;
// One input port, many output ports.
- int in_port_;
- std::vector<int> out_ports_;
+ int in_port_id_;
+ OutPortMap out_ports_; // guarded by out_ports_lock_
+ base::Lock out_ports_lock_; // guards out_ports_
- // Mapping from Alsa client:port to our index.
- typedef std::map<int, uint32> SourceMap;
+ // Mapping from ALSA client:port to our index.
SourceMap source_map_;
- // Alsa event <-> MIDI coders.
+ // Mapping from card to devices.
+ AlsaCardMap alsa_cards_;
+ STLValueDeleter<AlsaCardMap> alsa_cards_deleter_;
+
+ // This is the current count of midi devices across all cards we know
+ // about. When this number matches card_client_count_ in AlsaSeqState,
+ // we are safe to generate MIDIConnectionEvents. Otherwise we need to
+ // wait for our information from ALSA and udev to get back in sync.
+ int alsa_card_midi_count_;
+
+ // ALSA event -> MIDI coder.
snd_midi_event_t* decoder_;
- typedef std::vector<snd_midi_event_t*> EncoderList;
- EncoderList encoders_;
+
+ // udev, for querying hardware devices.
+ device::ScopedUdevPtr udev_;
+ device::ScopedUdevMonitorPtr udev_monitor_;
base::Thread send_thread_;
base::Thread event_thread_;
- bool event_thread_shutdown_; // guarded by shutdown_lock_
- base::Lock shutdown_lock_; // guards event_thread_shutdown_
+ bool event_thread_shutdown_; // guarded by shutdown_lock_
+ base::Lock shutdown_lock_; // guards event_thread_shutdown_
DISALLOW_COPY_AND_ASSIGN(MidiManagerAlsa);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_MANAGER_ALSA_H_
diff --git a/chromium/media/midi/midi_manager_alsa_unittest.cc b/chromium/media/midi/midi_manager_alsa_unittest.cc
new file mode 100644
index 00000000000..ce783f53035
--- /dev/null
+++ b/chromium/media/midi/midi_manager_alsa_unittest.cc
@@ -0,0 +1,700 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_manager_alsa.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace midi {
+
+class MidiManagerAlsaTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ // Pre-instantiate typical MidiPort instances that are often used in
+ // following tests.
+
+ // Inputs. port_input_0_ == port_input_1_.
+ port_input_0_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_1_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_minimal_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 0, 0, 0, "", "", "", "",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ // Outputs. port_output_0_ == port_output_1_.
+ port_output_0_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kOutput));
+ port_output_1_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kOutput));
+
+ // MidiPort fields that differ from port_input_0_ in a single way each time.
+ // Used for testing the Match* and Find* methods.
+ port_input_0_alt_path_.reset(new MidiManagerAlsa::MidiPort(
+ "path2", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_alt_id_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial2"),
+ 1, 2, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_alt_client_name_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 5, "client_name2", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_alt_port_name_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 5, "client_name", "port_name2", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_alt_client_id_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 2, 2, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_alt_port_id_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 3, 5, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_alt_midi_device_.reset(new MidiManagerAlsa::MidiPort(
+ "path", MidiManagerAlsa::MidiPort::Id("bus", "vendor", "model",
+ "interface", "serial"),
+ 1, 2, 6, "client_name", "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+
+ // "No card" variants of above. For testing FindDisconnected.
+ port_input_0_no_card_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 1, 2, -1, "client_name",
+ "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_1_no_card_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 1, 2, -1, "client_name",
+ "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_output_0_no_card_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 1, 2, -1, "client_name",
+ "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kOutput));
+
+ // No card variants of the alt variants from above. For more testing
+ // of Match* and Find*.
+ port_input_0_no_card_alt_client_name_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 1, 2, -1, "client_name2",
+ "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_no_card_alt_port_name_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 1, 2, -1, "client_name",
+ "port_name2", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_no_card_alt_client_id_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 2, 2, -1, "client_name",
+ "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ port_input_0_no_card_alt_port_id_.reset(new MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 1, 3, -1, "client_name",
+ "port_name", "manufacturer", "version",
+ MidiManagerAlsa::MidiPort::Type::kInput));
+ }
+
+ // Counts ports for help with testing ToMidiPortState().
+ int CountPorts(MidiManagerAlsa::TemporaryMidiPortState& state) {
+ int count = 0;
+ for (auto it = state.begin(); it != state.end(); ++it)
+ ++count;
+ return count;
+ }
+
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_1_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_minimal_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_output_0_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_output_1_;
+
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_alt_path_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_alt_id_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_alt_client_name_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_alt_port_name_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_alt_client_id_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_alt_port_id_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_alt_midi_device_;
+
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_no_card_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_1_no_card_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_output_0_no_card_;
+
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_no_card_alt_client_name_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_no_card_alt_port_name_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_no_card_alt_client_id_;
+ scoped_ptr<MidiManagerAlsa::MidiPort> port_input_0_no_card_alt_port_id_;
+
+ // State fields to avoid declaring in test fixtures below.
+ MidiManagerAlsa::MidiPortState midi_port_state_0_;
+ MidiManagerAlsa::MidiPortState midi_port_state_1_;
+ MidiManagerAlsa::TemporaryMidiPortState temporary_midi_port_state_0_;
+ MidiManagerAlsa::AlsaSeqState alsa_seq_state_0_;
+ MidiManagerAlsa::AlsaCardMap alsa_cards_;
+};
+
+// Tests that ExtractManufacturerString works as expected.
+TEST_F(MidiManagerAlsaTest, ExtractManufacturer) {
+ EXPECT_EQ("My\\x20Vendor",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "My\\x20Vendor", "1234", "My Vendor, Inc.", "Card",
+ "My Vendor Inc Card at bus"));
+ EXPECT_EQ("My Vendor", MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "My Vendor", "1234", "My Vendor, Inc.", "Card",
+ "My Vendor Inc Card at bus"));
+ EXPECT_EQ("My Vendor, Inc.",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "1234", "1234", "My Vendor, Inc.", "Card",
+ "My Vendor Inc Card at bus"));
+ EXPECT_EQ("My Vendor Inc",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "1234", "1234", "", "Card", "My Vendor Inc Card at bus"));
+ EXPECT_EQ("My Vendor Inc",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "", "", "", "Card", "My Vendor Inc Card at bus"));
+ EXPECT_EQ("", MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "1234", "1234", "", "Card", "Longname"));
+ EXPECT_EQ("Keystation\\x20Mini\\x2032",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "Keystation\\x20Mini\\x2032", "129d",
+ "Evolution Electronics, Ltd", "Keystation Mini 32",
+ "Keystation Mini 32 Keystation Mini 32 at"
+ " usb-0000:00:14.0-2.4.4, full speed"));
+ EXPECT_EQ("Keystation Mini 32",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "Keystation Mini 32", "129d", "Evolution Electronics, Ltd",
+ "Keystation Mini 32",
+ "Keystation Mini 32 Keystation Mini 32 at"
+ " usb-0000:00:14.0-2.4.4, full speed"));
+ EXPECT_EQ("Keystation Mini 32",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "", "", "", "Keystation Mini 32",
+ "Keystation Mini 32 Keystation Mini 32 at"
+ " usb-0000:00:14.0-2.4.4, full speed"));
+ EXPECT_EQ("", MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "", "", "", "Serial MIDI (UART16550A)",
+ "Serial MIDI (UART16550A) [Soundcanvas] at 0x3f8, irq 4"));
+ EXPECT_EQ("", MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "", "", "", "VirMIDI", "Virtual MIDI Card 1"));
+ EXPECT_EQ("C-Media Electronics Inc",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "", "0x13f6", "C-Media Electronics Inc", "C-Media CMI8738 MIDI",
+ "C-Media CMI8738 (model 55) at 0xd000, irq 19"));
+ EXPECT_EQ("C-Media Electronics Inc",
+ MidiManagerAlsa::AlsaCard::ExtractManufacturerString(
+ "", "0x13f6", "C-Media Electronics Inc", "C-Media CMI8738 FM",
+ "C-Media CMI8738 (model 55) at 0xd000, irq 19"));
+}
+
+// Tests that verify proper serialization and generation of opaque key for
+// MidiPort.
+TEST_F(MidiManagerAlsaTest, JSONPortMetadata) {
+ EXPECT_EQ(
+ "{\"bus\":\"bus\",\"clientId\":1,\"clientName\":\"client_name\","
+ "\"midiDevice\":5,\"modelId\":\"model\",\"path\":\"path\",\"portId\":2,"
+ "\"portName\":\"port_name\",\"serial\":\"serial\",\"type\":\"input\","
+ "\"usbInterfaceNum\":\"interface\",\"vendorId\":\"vendor\"}",
+ port_input_0_->JSONValue());
+
+ EXPECT_EQ("810194DAF713B32FC9BE40EC822E21682635B48C242D09EA95DBA4A184A95877",
+ port_input_0_->OpaqueKey());
+
+ EXPECT_EQ(
+ "{\"bus\":\"bus\",\"clientId\":1,\"clientName\":\"client_name\","
+ "\"midiDevice\":5,\"modelId\":\"model\",\"path\":\"path\",\"portId\":2,"
+ "\"portName\":\"port_name\",\"serial\":\"serial\",\"type\":\"output\","
+ "\"usbInterfaceNum\":\"interface\",\"vendorId\":\"vendor\"}",
+ port_output_0_->JSONValue());
+ EXPECT_EQ("C32552FC772A0CA453A675CED05EFB3BDEF749EB58ED9522475206F111BC01E2",
+ port_output_0_->OpaqueKey());
+
+ EXPECT_EQ("{\"clientId\":0,\"midiDevice\":0,\"portId\":0,\"type\":\"input\"}",
+ port_input_minimal_->JSONValue());
+ EXPECT_EQ("3BC2A85598E5026D01DBCB022016C8A3362A9C7F912B88E303BF619C56D0C111",
+ port_input_minimal_->OpaqueKey());
+}
+
+// Tests Match* methods.
+TEST_F(MidiManagerAlsaTest, MatchConnected) {
+ // The query can be disconnected or connected, but the target
+ // must be connected.
+ port_input_1_->set_connected(false);
+ EXPECT_TRUE(port_input_0_->MatchConnected(*port_input_1_.get()));
+ EXPECT_FALSE(port_input_1_->MatchConnected(*port_input_0_.get()));
+
+ // Differing types.
+ EXPECT_FALSE(port_input_0_->MatchConnected(*port_output_0_.get()));
+
+ // Differing in 1 field. None should succeed.
+ EXPECT_FALSE(port_input_0_->MatchConnected(*port_input_0_alt_path_.get()));
+ EXPECT_FALSE(port_input_0_->MatchConnected(*port_input_0_alt_id_.get()));
+ EXPECT_FALSE(
+ port_input_0_->MatchConnected(*port_input_0_alt_client_name_.get()));
+ EXPECT_FALSE(
+ port_input_0_->MatchConnected(*port_input_0_alt_port_name_.get()));
+ EXPECT_FALSE(
+ port_input_0_->MatchConnected(*port_input_0_alt_client_id_.get()));
+ EXPECT_FALSE(port_input_0_->MatchConnected(*port_input_0_alt_port_id_.get()));
+ EXPECT_FALSE(
+ port_input_0_->MatchConnected(*port_input_0_alt_midi_device_.get()));
+}
+
+TEST_F(MidiManagerAlsaTest, MatchCard1) {
+ // The query can be disconnected or connected, but the target
+ // must be disonnected.
+ EXPECT_FALSE(port_input_0_->MatchCardPass1(*port_input_1_.get()));
+ port_input_0_->set_connected(false);
+ EXPECT_TRUE(port_input_0_->MatchCardPass1(*port_input_1_.get()));
+
+ // Differing types.
+ EXPECT_FALSE(port_input_0_->MatchCardPass1(*port_output_0_.get()));
+
+ // Tests matches differing in 1 field.
+ // client_name, port_name, client_id are ok to differ.
+ EXPECT_FALSE(port_input_0_->MatchCardPass1(*port_input_0_alt_path_.get()));
+ EXPECT_FALSE(port_input_0_->MatchCardPass1(*port_input_0_alt_id_.get()));
+ EXPECT_TRUE(
+ port_input_0_->MatchCardPass1(*port_input_0_alt_client_name_.get()));
+ EXPECT_TRUE(
+ port_input_0_->MatchCardPass1(*port_input_0_alt_port_name_.get()));
+ EXPECT_TRUE(
+ port_input_0_->MatchCardPass1(*port_input_0_alt_client_id_.get()));
+ EXPECT_FALSE(port_input_0_->MatchCardPass1(*port_input_0_alt_port_id_.get()));
+ EXPECT_FALSE(
+ port_input_0_->MatchCardPass1(*port_input_0_alt_midi_device_.get()));
+}
+
+TEST_F(MidiManagerAlsaTest, MatchCard2) {
+ // The query can be disconnected or connected, but the target
+ // must be disonnected.
+ EXPECT_FALSE(port_input_0_->MatchCardPass2(*port_input_1_.get()));
+ port_input_0_->set_connected(false);
+ EXPECT_TRUE(port_input_0_->MatchCardPass2(*port_input_1_.get()));
+
+ // Differing types.
+ EXPECT_FALSE(port_input_0_->MatchCardPass2(*port_output_0_.get()));
+
+ // Tests matches differing in 1 field.
+ // client_name, port_name, path, client_id are ok to differ.
+ EXPECT_TRUE(port_input_0_->MatchCardPass2(*port_input_0_alt_path_.get()));
+ EXPECT_FALSE(port_input_0_->MatchCardPass2(*port_input_0_alt_id_.get()));
+ EXPECT_TRUE(
+ port_input_0_->MatchCardPass2(*port_input_0_alt_client_name_.get()));
+ EXPECT_TRUE(
+ port_input_0_->MatchCardPass2(*port_input_0_alt_port_name_.get()));
+ EXPECT_TRUE(
+ port_input_0_->MatchCardPass2(*port_input_0_alt_client_id_.get()));
+ EXPECT_FALSE(port_input_0_->MatchCardPass2(*port_input_0_alt_port_id_.get()));
+ EXPECT_FALSE(
+ port_input_0_->MatchCardPass2(*port_input_0_alt_midi_device_.get()));
+}
+
+TEST_F(MidiManagerAlsaTest, MatchNoCard1) {
+ // The query can be disconnected or connected, but the target
+ // must be disonnected.
+ // path and id must be empty. midi_device must be -1.
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass1(*port_input_1_.get()));
+ port_input_0_no_card_->set_connected(false);
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass1(*port_input_1_.get()));
+ EXPECT_TRUE(
+ port_input_0_no_card_->MatchNoCardPass1(*port_input_1_no_card_.get()));
+
+ // Differing types.
+ EXPECT_FALSE(
+ port_input_0_no_card_->MatchNoCardPass1(*port_output_0_no_card_.get()));
+
+ // Tests matches differing in 1 field.
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass1(
+ *port_input_0_no_card_alt_client_name_.get()));
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass1(
+ *port_input_0_no_card_alt_port_name_.get()));
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass1(
+ *port_input_0_no_card_alt_client_id_.get()));
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass1(
+ *port_input_0_no_card_alt_port_id_.get()));
+}
+
+TEST_F(MidiManagerAlsaTest, MatchNoCard2) {
+ // The query can be disconnected or connected, but the target
+ // must be disonnected.
+ // path and id must be empty. midi_device must be -1.
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass2(*port_input_1_.get()));
+ port_input_0_no_card_->set_connected(false);
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass2(*port_input_1_.get()));
+ EXPECT_TRUE(
+ port_input_0_no_card_->MatchNoCardPass2(*port_input_1_no_card_.get()));
+
+ // Differing types.
+ EXPECT_FALSE(
+ port_input_0_no_card_->MatchNoCardPass2(*port_output_0_no_card_.get()));
+
+ // Tests matches differing in 1 field.
+ // client_id ok to differ.
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass2(
+ *port_input_0_no_card_alt_client_name_.get()));
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass2(
+ *port_input_0_no_card_alt_port_name_.get()));
+ EXPECT_TRUE(port_input_0_no_card_->MatchNoCardPass2(
+ *port_input_0_no_card_alt_client_id_.get()));
+ EXPECT_FALSE(port_input_0_no_card_->MatchNoCardPass2(
+ *port_input_0_no_card_alt_port_id_.get()));
+}
+
+// Tests that MidiPorts start connected.
+TEST_F(MidiManagerAlsaTest, PortStartsConnected) {
+ EXPECT_TRUE(port_output_0_->connected());
+ EXPECT_TRUE(port_input_0_->connected());
+}
+
+// Tests that the web_port_index gets updated by MidiPortState.
+TEST_F(MidiManagerAlsaTest, PortIndexSet) {
+ auto* port_input_0_tracking_pointer = port_input_0_.get();
+ auto* port_output_0_tracking_pointer = port_output_0_.get();
+ auto* port_input_1_tracking_pointer = port_input_1_.get();
+ auto* port_output_1_tracking_pointer = port_input_1_.get();
+
+ // Explicitly initialize web_port_index.
+ port_input_1_->set_web_port_index(5000);
+ port_output_1_->set_web_port_index(5000);
+
+ midi_port_state_0_.Insert(port_input_0_.Pass());
+ midi_port_state_0_.Insert(port_output_0_.Pass());
+ midi_port_state_0_.Insert(port_input_1_.Pass());
+ midi_port_state_0_.Insert(port_output_1_.Pass());
+
+ // First port of each type has index of 0.
+ EXPECT_EQ(0U, port_input_0_tracking_pointer->web_port_index());
+ EXPECT_EQ(0U, port_output_0_tracking_pointer->web_port_index());
+ // Second port of each type has index of 1.
+ EXPECT_EQ(1U, port_input_1_tracking_pointer->web_port_index());
+ EXPECT_EQ(1U, port_output_1_tracking_pointer->web_port_index());
+}
+
+// Tests that the web_port_index is not updated by TemporaryMidiPortState.
+TEST_F(MidiManagerAlsaTest, PortIndexNotSet) {
+ auto* port_input_0_tracking_pointer = port_input_0_.get();
+ auto* port_output_0_tracking_pointer = port_output_0_.get();
+ auto* port_input_1_tracking_pointer = port_input_1_.get();
+ auto* port_output_1_tracking_pointer = port_input_1_.get();
+
+ // Explicitly initialize web_port_index.
+ port_input_1_->set_web_port_index(5000);
+ port_output_1_->set_web_port_index(5000);
+
+ temporary_midi_port_state_0_.Insert(port_input_0_.Pass());
+ temporary_midi_port_state_0_.Insert(port_output_0_.Pass());
+ temporary_midi_port_state_0_.Insert(port_input_1_.Pass());
+ temporary_midi_port_state_0_.Insert(port_output_1_.Pass());
+
+ // web_port_index is untouched.
+ EXPECT_EQ(0U, port_input_0_tracking_pointer->web_port_index());
+ EXPECT_EQ(0U, port_output_0_tracking_pointer->web_port_index());
+ EXPECT_EQ(5000U, port_input_1_tracking_pointer->web_port_index());
+ EXPECT_EQ(5000U, port_output_1_tracking_pointer->web_port_index());
+}
+
+// Tests that inputs and outputs stay separate in MidiPortState.
+TEST_F(MidiManagerAlsaTest, SeparateInputOutput) {
+ auto* port_input_0_tracking_pointer = port_input_0_.get();
+ auto* port_output_0_tracking_pointer = port_output_0_.get();
+ auto* port_input_1_tracking_pointer = port_input_1_.get();
+ auto* port_output_1_tracking_pointer = port_input_1_.get();
+
+ // First port of each type has index of 0.
+ EXPECT_EQ(0U, midi_port_state_0_.Insert(port_input_0_.Pass()));
+ EXPECT_EQ(0U, midi_port_state_0_.Insert(port_output_0_.Pass()));
+
+ // Second port of each type has index of 1.
+ EXPECT_EQ(1U, midi_port_state_0_.Insert(port_input_1_.Pass()));
+ EXPECT_EQ(1U, midi_port_state_0_.Insert(port_output_1_.Pass()));
+
+ // Check again that the field matches what was returned.
+ EXPECT_EQ(0U, port_input_0_tracking_pointer->web_port_index());
+ EXPECT_EQ(0U, port_output_0_tracking_pointer->web_port_index());
+ EXPECT_EQ(1U, port_input_1_tracking_pointer->web_port_index());
+ EXPECT_EQ(1U, port_output_1_tracking_pointer->web_port_index());
+}
+
+// Tests FindConnected.
+TEST_F(MidiManagerAlsaTest, FindConnected) {
+ auto* port_input_0_tracking_pointer = port_input_0_.get();
+ auto* port_input_1_tracking_pointer = port_input_1_.get();
+
+ // Insert port_input_0.
+ midi_port_state_0_.Insert(port_input_0_.Pass());
+ // Look for port_input_1 (every field matches port_input_0).
+ auto it = midi_port_state_0_.FindConnected(*port_input_1_tracking_pointer);
+ EXPECT_EQ(port_input_0_tracking_pointer, *it);
+ // Look for something else that we won't find.
+ EXPECT_EQ(midi_port_state_0_.end(),
+ midi_port_state_0_.FindConnected(*port_input_0_alt_path_));
+}
+
+TEST_F(MidiManagerAlsaTest, FindConnected2) {
+ auto* port_input_0_tracking_pointer = port_input_0_.get();
+ auto* port_input_1_tracking_pointer = port_input_1_.get();
+
+ // Insert some stuff.
+ midi_port_state_0_.Insert(port_input_0_alt_path_.Pass());
+ midi_port_state_0_.Insert(port_input_0_alt_id_.Pass());
+ midi_port_state_0_.Insert(port_input_0_alt_client_name_.Pass());
+ // Insert port_input_0.
+ midi_port_state_0_.Insert(port_input_0_.Pass());
+ // Insert some more stuff.
+ midi_port_state_0_.Insert(port_input_0_alt_port_id_.Pass());
+ // Look for port_input_1 (matches to port_input_0).
+ auto it = midi_port_state_0_.FindConnected(*port_input_1_tracking_pointer);
+ EXPECT_EQ(port_input_0_tracking_pointer, *it);
+ // Look for something else that we won't find.
+ EXPECT_EQ(midi_port_state_0_.end(),
+ midi_port_state_0_.FindConnected(*port_input_minimal_));
+}
+
+TEST_F(MidiManagerAlsaTest, FindConnected3) {
+ // midi_port_state_0_ is empty to start.
+ EXPECT_EQ(midi_port_state_0_.end(),
+ midi_port_state_0_.FindConnected(*port_input_minimal_));
+}
+
+// Tests FindDisconnected.
+TEST_F(MidiManagerAlsaTest, FindDisconnected) {
+ // midi_port_state_0_ is empty to start.
+ EXPECT_EQ(midi_port_state_0_.end(),
+ midi_port_state_0_.FindDisconnected(*port_input_minimal_));
+}
+
+TEST_F(MidiManagerAlsaTest, FindDisconnected2) {
+ auto* port_input_0_tracking_pointer = port_input_0_.get();
+ auto* port_input_1_tracking_pointer = port_input_1_.get();
+ auto* port_input_1_no_card_tracking_pointer = port_input_1_no_card_.get();
+
+ // Ports need to be disconnected to find them.
+ port_input_0_alt_id_->set_connected(false);
+ port_input_0_alt_path_->set_connected(false);
+ port_input_0_->set_connected(false);
+
+ // Insert some stuff.
+ midi_port_state_0_.Insert(port_input_0_alt_id_.Pass());
+ midi_port_state_0_.Insert(port_input_0_alt_path_.Pass());
+ // Insert port_input_0.
+ midi_port_state_0_.Insert(port_input_0_.Pass());
+
+ // Add "no card" stuff.
+ port_input_1_no_card_->set_connected(false);
+ midi_port_state_0_.Insert(port_input_1_no_card_.Pass());
+
+ // Insert some more stuff.
+ midi_port_state_0_.Insert(port_input_0_alt_port_id_.Pass());
+
+ // Look for port_input_1, should trigger exact match.
+ EXPECT_EQ(port_input_0_tracking_pointer, *midi_port_state_0_.FindDisconnected(
+ *port_input_1_tracking_pointer));
+
+ // Look for no card exact match.
+ EXPECT_EQ(port_input_1_no_card_tracking_pointer,
+ *midi_port_state_0_.FindDisconnected(*port_input_0_no_card_.get()));
+
+ // Look for something else that we won't find.
+ EXPECT_EQ(midi_port_state_0_.end(),
+ midi_port_state_0_.FindDisconnected(*port_input_minimal_));
+}
+
+TEST_F(MidiManagerAlsaTest, FindDisconnected3) {
+ auto* port_input_0_tracking_pointer = port_input_0_.get();
+ auto* port_input_0_alt_path_tracking_pointer = port_input_0_alt_path_.get();
+ auto* port_input_1_no_card_tracking_pointer = port_input_1_no_card_.get();
+
+ // Ports need to be disconnected to find them.
+ port_input_0_alt_path_->set_connected(false);
+ port_input_0_->set_connected(false);
+
+ // Insert some stuff.
+ midi_port_state_0_.Insert(port_input_0_alt_path_.Pass());
+ midi_port_state_0_.Insert(port_input_0_alt_id_.Pass());
+
+ // Add no card stuff.
+ port_input_1_no_card_->set_connected(false);
+ midi_port_state_0_.Insert(port_input_1_no_card_.Pass());
+
+ // Look for port_input_0, should find port_input_0_alt_path.
+ EXPECT_EQ(
+ port_input_0_alt_path_tracking_pointer,
+ *midi_port_state_0_.FindDisconnected(*port_input_0_tracking_pointer));
+
+ // Look for no card partial match.
+ EXPECT_EQ(port_input_1_no_card_tracking_pointer,
+ *midi_port_state_0_.FindDisconnected(
+ *port_input_0_no_card_alt_client_id_.get()));
+
+ // Won't find this.
+ EXPECT_EQ(midi_port_state_0_.end(),
+ midi_port_state_0_.FindDisconnected(
+ *port_input_0_no_card_alt_port_id_.get()));
+
+ // Look for something else that we won't find.
+ EXPECT_EQ(midi_port_state_0_.end(),
+ midi_port_state_0_.FindDisconnected(*port_input_minimal_));
+}
+
+// Tests AlsaSeqState -> MidiPortState.
+TEST_F(MidiManagerAlsaTest, ToMidiPortState) {
+ // Empty state.
+ EXPECT_EQ(0,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Still empty, because there are no ports yet.
+ alsa_seq_state_0_.ClientStart(0, "0", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(0,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add a port, now it has 1 item when converted.
+ alsa_seq_state_0_.PortStart(
+ 0, 0, "0:0", MidiManagerAlsa::AlsaSeqState::PortDirection::kInput, true);
+ EXPECT_EQ(1,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Close client. This closes its ports and returns count to 0.
+ alsa_seq_state_0_.ClientExit(0);
+ EXPECT_EQ(0,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add another port, without client. This does nothing.
+ alsa_seq_state_0_.PortStart(
+ 0, 0, "0:0", MidiManagerAlsa::AlsaSeqState::PortDirection::kInput, true);
+ EXPECT_EQ(0,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Close client again. This does nothing.
+ alsa_seq_state_0_.ClientExit(0);
+ EXPECT_EQ(0,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add duplex port. This will add 2 ports when converted.
+ alsa_seq_state_0_.ClientStart(0, "0", SND_SEQ_KERNEL_CLIENT);
+ alsa_seq_state_0_.PortStart(
+ 0, 0, "0:0", MidiManagerAlsa::AlsaSeqState::PortDirection::kDuplex, true);
+ EXPECT_EQ(2,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add an output port. Now we are at 3.
+ alsa_seq_state_0_.PortStart(
+ 0, 1, "0:1", MidiManagerAlsa::AlsaSeqState::PortDirection::kOutput, true);
+ EXPECT_EQ(3,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add another client. Still at 3.
+ alsa_seq_state_0_.ClientStart(1, "1", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(3,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add a port. Now at 4.
+ alsa_seq_state_0_.PortStart(
+ 1, 0, "1:0", MidiManagerAlsa::AlsaSeqState::PortDirection::kInput, true);
+ EXPECT_EQ(4,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add a duplicate port. Still at 4.
+ alsa_seq_state_0_.PortStart(
+ 1, 0, "1:0", MidiManagerAlsa::AlsaSeqState::PortDirection::kInput, true);
+ EXPECT_EQ(4,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Add a duplicate client. This will close the ports from the previous client.
+ alsa_seq_state_0_.ClientStart(1, "1", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(3,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Remove a duplex port. This reduces count by 2.
+ alsa_seq_state_0_.PortExit(0, 0);
+ EXPECT_EQ(1,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Remove a non-existent port. No change.
+ alsa_seq_state_0_.PortExit(0, 0);
+ EXPECT_EQ(1,
+ CountPorts(*alsa_seq_state_0_.ToMidiPortState(alsa_cards_).get()));
+
+ // Verify the last entry.
+ EXPECT_TRUE((*alsa_seq_state_0_.ToMidiPortState(alsa_cards_)->begin())
+ ->MatchConnected(MidiManagerAlsa::MidiPort(
+ "", MidiManagerAlsa::MidiPort::Id(), 0, 1, -1, "0", "0:1",
+ "", "", MidiManagerAlsa::MidiPort::Type::kOutput)));
+}
+
+// Tests card_client_count of AlsaSeqState.
+TEST_F(MidiManagerAlsaTest, CardClientCount) {
+ EXPECT_EQ(0, alsa_seq_state_0_.card_client_count());
+
+ // Add a kernel client.
+ alsa_seq_state_0_.ClientStart(16, "16", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(1, alsa_seq_state_0_.card_client_count());
+
+ // Add a duplicate kernel client.
+ alsa_seq_state_0_.ClientStart(16, "16", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(1, alsa_seq_state_0_.card_client_count());
+
+ // Add a duplicate user client.
+ alsa_seq_state_0_.ClientStart(16, "16", SND_SEQ_USER_CLIENT);
+ EXPECT_EQ(0, alsa_seq_state_0_.card_client_count());
+
+ // Add 2 more kernel clients.
+ alsa_seq_state_0_.ClientStart(17, "17", SND_SEQ_KERNEL_CLIENT);
+ alsa_seq_state_0_.ClientStart(18, "18", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(2, alsa_seq_state_0_.card_client_count());
+
+ // Add another user client.
+ alsa_seq_state_0_.ClientStart(101, "101", SND_SEQ_USER_CLIENT);
+ EXPECT_EQ(2, alsa_seq_state_0_.card_client_count());
+
+ // Remove kernel client.
+ alsa_seq_state_0_.ClientExit(17);
+ EXPECT_EQ(1, alsa_seq_state_0_.card_client_count());
+
+ // Remove user client.
+ alsa_seq_state_0_.ClientExit(16);
+ EXPECT_EQ(1, alsa_seq_state_0_.card_client_count());
+
+ // Remove kernel client.
+ alsa_seq_state_0_.ClientExit(18);
+ EXPECT_EQ(0, alsa_seq_state_0_.card_client_count());
+
+ // Add a duplicate kernel client.
+ alsa_seq_state_0_.ClientStart(101, "101", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(1, alsa_seq_state_0_.card_client_count());
+
+ // Add a low kernel client.
+ alsa_seq_state_0_.ClientStart(1, "1", SND_SEQ_KERNEL_CLIENT);
+ EXPECT_EQ(1, alsa_seq_state_0_.card_client_count());
+
+ // Remove low kernel client.
+ alsa_seq_state_0_.ClientExit(1);
+ EXPECT_EQ(1, alsa_seq_state_0_.card_client_count());
+}
+
+TEST_F(MidiManagerAlsaTest, AlsaCards) {
+ // TODO(agoode): test add/remove of alsa cards.
+}
+
+// TODO(agoode): Test old -> new state event generation, using mocks.
+
+} // namespace midi
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_android.cc b/chromium/media/midi/midi_manager_android.cc
index b8385fb0f48..ce3db4aa385 100644
--- a/chromium/media/midi/midi_manager_android.cc
+++ b/chromium/media/midi/midi_manager_android.cc
@@ -7,10 +7,12 @@
#include "media/midi/usb_midi_device_factory_android.h"
namespace media {
+namespace midi {
MidiManager* MidiManager::Create() {
return new MidiManagerUsb(
scoped_ptr<UsbMidiDevice::Factory>(new UsbMidiDeviceFactoryAndroid));
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager_mac.cc b/chromium/media/midi/midi_manager_mac.cc
index e9d56fb13ad..1b4c696e5b9 100644
--- a/chromium/media/midi/midi_manager_mac.cc
+++ b/chromium/media/midi/midi_manager_mac.cc
@@ -4,7 +4,7 @@
#include "media/midi/midi_manager_mac.h"
-#include <string>
+#include <algorithm>
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
@@ -22,15 +22,16 @@ using std::string;
// (if possible).
namespace media {
+namespace midi {
namespace {
-MidiPortInfo GetPortInfoFromEndpoint(
- MIDIEndpointRef endpoint) {
- SInt32 id_number = 0;
- MIDIObjectGetIntegerProperty(endpoint, kMIDIPropertyUniqueID, &id_number);
- string id = IntToString(id_number);
+// Maximum buffer size that CoreMIDI can handle for MIDIPacketList.
+const size_t kCoreMIDIMaxPacketListSize = 65536;
+// Pessimistic estimation on available data size of MIDIPacketList.
+const size_t kEstimatedMaxPacketDataSize = kCoreMIDIMaxPacketListSize / 2;
+MidiPortInfo GetPortInfoFromEndpoint(MIDIEndpointRef endpoint) {
string manufacturer;
CFStringRef manufacturer_ref = NULL;
OSStatus result = MIDIObjectGetStringProperty(
@@ -46,11 +47,14 @@ MidiPortInfo GetPortInfoFromEndpoint(
string name;
CFStringRef name_ref = NULL;
- result = MIDIObjectGetStringProperty(endpoint, kMIDIPropertyName, &name_ref);
- if (result == noErr)
+ result = MIDIObjectGetStringProperty(endpoint, kMIDIPropertyDisplayName,
+ &name_ref);
+ if (result == noErr) {
name = SysCFStringRefToUTF8(name_ref);
- else
- DLOG(WARNING) << "Failed to get kMIDIPropertyName with status " << result;
+ } else {
+ DLOG(WARNING) << "Failed to get kMIDIPropertyDisplayName with status "
+ << result;
+ }
string version;
SInt32 version_number = 0;
@@ -65,7 +69,24 @@ MidiPortInfo GetPortInfoFromEndpoint(
<< result;
}
- return MidiPortInfo(id, manufacturer, name, version);
+ string id;
+ SInt32 id_number = 0;
+ result = MIDIObjectGetIntegerProperty(
+ endpoint, kMIDIPropertyUniqueID, &id_number);
+ if (result == noErr) {
+ id = IntToString(id_number);
+ } else {
+ // On connecting some devices, e.g., nano KONTROL2, unknown endpoints
+ // appear and disappear quickly and they fail on queries.
+ // Let's ignore such ghost devices.
+ // Same problems will happen if the device is disconnected before finishing
+ // all queries.
+ DLOG(WARNING) << "Failed to get kMIDIPropertyUniqueID with status "
+ << result;
+ }
+
+ const MidiPortState state = MIDI_PORT_OPENED;
+ return MidiPortInfo(id, manufacturer, name, version, state);
}
double MIDITimeStampToSeconds(MIDITimeStamp timestamp) {
@@ -88,8 +109,6 @@ MidiManagerMac::MidiManagerMac()
: midi_client_(0),
coremidi_input_(0),
coremidi_output_(0),
- packet_list_(NULL),
- midi_packet_(NULL),
client_thread_("MidiClientThread"),
shutdown_(false) {
}
@@ -137,38 +156,43 @@ void MidiManagerMac::InitializeCoreMIDI() {
DCHECK(client_thread_.message_loop_proxy()->BelongsToCurrentThread());
// CoreMIDI registration.
- // TODO(toyoshim): Set MIDINotifyProc to receive CoreMIDI event notifications.
- midi_client_ = 0;
+ DCHECK_EQ(0u, midi_client_);
OSStatus result =
- MIDIClientCreate(CFSTR("Chrome"), NULL, NULL, &midi_client_);
-
- if (result != noErr)
+ MIDIClientCreate(CFSTR("Chrome"), ReceiveMidiNotifyDispatch, this,
+ &midi_client_);
+ if (result != noErr || midi_client_ == 0)
return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
- coremidi_input_ = 0;
-
// Create input and output port.
+ DCHECK_EQ(0u, coremidi_input_);
result = MIDIInputPortCreate(
midi_client_,
CFSTR("MIDI Input"),
ReadMidiDispatch,
this,
&coremidi_input_);
- if (result != noErr)
+ if (result != noErr || coremidi_input_ == 0)
return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ DCHECK_EQ(0u, coremidi_output_);
result = MIDIOutputPortCreate(
midi_client_,
CFSTR("MIDI Output"),
&coremidi_output_);
- if (result != noErr)
+ if (result != noErr || coremidi_output_ == 0)
return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ // Following loop may miss some newly attached devices, but such device will
+ // be captured by ReceiveMidiNotifyDispatch callback.
uint32 destination_count = MIDIGetNumberOfDestinations();
destinations_.resize(destination_count);
-
for (uint32 i = 0; i < destination_count ; i++) {
MIDIEndpointRef destination = MIDIGetDestination(i);
+ if (destination == 0) {
+ // One ore more devices may be detached.
+ destinations_.resize(i);
+ break;
+ }
// Keep track of all destinations (known as outputs by the Web MIDI API).
// Cache to avoid any possible overhead in calling MIDIGetDestination().
@@ -178,28 +202,102 @@ void MidiManagerMac::InitializeCoreMIDI() {
AddOutputPort(info);
}
- // Open connections from all sources.
+ // Open connections from all sources. This loop also may miss new devices.
uint32 source_count = MIDIGetNumberOfSources();
-
for (uint32 i = 0; i < source_count; ++i) {
// Receive from all sources.
- MIDIEndpointRef src = MIDIGetSource(i);
- MIDIPortConnectSource(coremidi_input_, src, reinterpret_cast<void*>(src));
+ MIDIEndpointRef source = MIDIGetSource(i);
+ if (source == 0)
+ break;
+
+ // Start listening.
+ MIDIPortConnectSource(
+ coremidi_input_, source, reinterpret_cast<void*>(source));
// Keep track of all sources (known as inputs in Web MIDI API terminology).
- source_map_[src] = i;
+ source_map_[source] = i;
- MidiPortInfo info = GetPortInfoFromEndpoint(src);
+ MidiPortInfo info = GetPortInfoFromEndpoint(source);
AddInputPort(info);
}
- packet_list_ = reinterpret_cast<MIDIPacketList*>(midi_buffer_);
- midi_packet_ = MIDIPacketListInit(packet_list_);
+ // Allocate maximum size of buffer that CoreMIDI can handle.
+ midi_buffer_.resize(kCoreMIDIMaxPacketListSize);
CompleteInitialization(MIDI_OK);
}
// static
+void MidiManagerMac::ReceiveMidiNotifyDispatch(const MIDINotification* message,
+ void* refcon) {
+ // This callback function is invoked on |client_thread_|.
+ MidiManagerMac* manager = static_cast<MidiManagerMac*>(refcon);
+ manager->ReceiveMidiNotify(message);
+}
+
+void MidiManagerMac::ReceiveMidiNotify(const MIDINotification* message) {
+ DCHECK(client_thread_.message_loop_proxy()->BelongsToCurrentThread());
+
+ if (kMIDIMsgObjectAdded == message->messageID) {
+ // New device is going to be attached.
+ const MIDIObjectAddRemoveNotification* notification =
+ reinterpret_cast<const MIDIObjectAddRemoveNotification*>(message);
+ MIDIEndpointRef endpoint =
+ static_cast<MIDIEndpointRef>(notification->child);
+ if (notification->childType == kMIDIObjectType_Source) {
+ // Attaching device is an input device.
+ auto it = source_map_.find(endpoint);
+ if (it == source_map_.end()) {
+ MidiPortInfo info = GetPortInfoFromEndpoint(endpoint);
+ // If the device disappears before finishing queries, MidiPortInfo
+ // becomes incomplete. Skip and do not cache such information here.
+ // On kMIDIMsgObjectRemoved, the entry will be ignored because it
+ // will not be found in the pool.
+ if (!info.id.empty()) {
+ uint32 index = source_map_.size();
+ source_map_[endpoint] = index;
+ AddInputPort(info);
+ MIDIPortConnectSource(
+ coremidi_input_, endpoint, reinterpret_cast<void*>(endpoint));
+ }
+ } else {
+ SetInputPortState(it->second, MIDI_PORT_OPENED);
+ }
+ } else if (notification->childType == kMIDIObjectType_Destination) {
+ // Attaching device is an output device.
+ auto it = std::find(destinations_.begin(), destinations_.end(), endpoint);
+ if (it == destinations_.end()) {
+ MidiPortInfo info = GetPortInfoFromEndpoint(endpoint);
+ // Skip cases that queries are not finished correctly.
+ if (!info.id.empty()) {
+ destinations_.push_back(endpoint);
+ AddOutputPort(info);
+ }
+ } else {
+ SetOutputPortState(it - destinations_.begin(), MIDI_PORT_OPENED);
+ }
+ }
+ } else if (kMIDIMsgObjectRemoved == message->messageID) {
+ // Existing device is going to be detached.
+ const MIDIObjectAddRemoveNotification* notification =
+ reinterpret_cast<const MIDIObjectAddRemoveNotification*>(message);
+ MIDIEndpointRef endpoint =
+ static_cast<MIDIEndpointRef>(notification->child);
+ if (notification->childType == kMIDIObjectType_Source) {
+ // Detaching device is an input device.
+ auto it = source_map_.find(endpoint);
+ if (it != source_map_.end())
+ SetInputPortState(it->second, MIDI_PORT_DISCONNECTED);
+ } else if (notification->childType == kMIDIObjectType_Destination) {
+ // Detaching device is an output device.
+ auto it = std::find(destinations_.begin(), destinations_.end(), endpoint);
+ if (it != destinations_.end())
+ SetOutputPortState(it - destinations_.begin(), MIDI_PORT_DISCONNECTED);
+ }
+ }
+}
+
+// static
void MidiManagerMac::ReadMidiDispatch(const MIDIPacketList* packet_list,
void* read_proc_refcon,
void* src_conn_refcon) {
@@ -222,24 +320,26 @@ void MidiManagerMac::ReadMidi(MIDIEndpointRef source,
// high-priority thread owned by CoreMIDI.
// Lookup the port index based on the source.
- SourceMap::iterator j = source_map_.find(source);
- if (j == source_map_.end())
+ auto it = source_map_.find(source);
+ if (it == source_map_.end())
return;
// This is safe since MidiManagerMac does not remove any existing
// MIDIEndpointRef, and the order is saved.
- uint32 port_index = source_map_[source];
+ uint32 port_index = it->second;
// Go through each packet and process separately.
+ const MIDIPacket* packet = &packet_list->packet[0];
for (size_t i = 0; i < packet_list->numPackets; i++) {
// Each packet contains MIDI data for one or more messages (like note-on).
- const MIDIPacket &packet = packet_list->packet[i];
- double timestamp_seconds = MIDITimeStampToSeconds(packet.timeStamp);
+ double timestamp_seconds = MIDITimeStampToSeconds(packet->timeStamp);
ReceiveMidiData(
port_index,
- packet.data,
- packet.length,
+ packet->data,
+ packet->length,
timestamp_seconds);
+
+ packet = MIDIPacketNext(packet);
}
}
@@ -249,29 +349,38 @@ void MidiManagerMac::SendMidiData(MidiManagerClient* client,
double timestamp) {
DCHECK(client_thread_.message_loop_proxy()->BelongsToCurrentThread());
- // System Exclusive has already been filtered.
- MIDITimeStamp coremidi_timestamp = SecondsToMIDITimeStamp(timestamp);
-
- midi_packet_ = MIDIPacketListAdd(
- packet_list_,
- kMaxPacketListSize,
- midi_packet_,
- coremidi_timestamp,
- data.size(),
- &data[0]);
-
// Lookup the destination based on the port index.
if (static_cast<size_t>(port_index) >= destinations_.size())
return;
+ MIDITimeStamp coremidi_timestamp = SecondsToMIDITimeStamp(timestamp);
MIDIEndpointRef destination = destinations_[port_index];
- MIDISend(coremidi_output_, destination, packet_list_);
-
- // Re-initialize for next time.
- midi_packet_ = MIDIPacketListInit(packet_list_);
+ size_t send_size;
+ for (size_t sent_size = 0; sent_size < data.size(); sent_size += send_size) {
+ MIDIPacketList* packet_list =
+ reinterpret_cast<MIDIPacketList*>(midi_buffer_.data());
+ MIDIPacket* midi_packet = MIDIPacketListInit(packet_list);
+ // Limit the maximum payload size to kEstimatedMaxPacketDataSize that is
+ // half of midi_buffer data size. MIDIPacketList and MIDIPacket consume
+ // extra buffer areas for meta information, and available size is smaller
+ // than buffer size. Here, we simply assume that at least half size is
+ // available for data payload.
+ send_size = std::min(data.size() - sent_size, kEstimatedMaxPacketDataSize);
+ midi_packet = MIDIPacketListAdd(
+ packet_list,
+ kCoreMIDIMaxPacketListSize,
+ midi_packet,
+ coremidi_timestamp,
+ send_size,
+ &data[sent_size]);
+ DCHECK(midi_packet);
+
+ MIDISend(coremidi_output_, destination, packet_list);
+ }
client->AccumulateMidiBytesSent(data.size());
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager_mac.h b/chromium/media/midi/midi_manager_mac.h
index f7179ec18a7..628bfb69504 100644
--- a/chromium/media/midi/midi_manager_mac.h
+++ b/chromium/media/midi/midi_manager_mac.h
@@ -14,12 +14,14 @@
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/threading/thread.h"
+#include "media/midi/midi_export.h"
#include "media/midi/midi_manager.h"
#include "media/midi/midi_port_info.h"
namespace media {
+namespace midi {
-class MEDIA_EXPORT MidiManagerMac : public MidiManager {
+class MIDI_EXPORT MidiManagerMac final : public MidiManager {
public:
MidiManagerMac();
~MidiManagerMac() override;
@@ -42,13 +44,18 @@ class MEDIA_EXPORT MidiManagerMac : public MidiManager {
// StartInitialization().
void InitializeCoreMIDI();
+ // CoreMIDI callback for MIDI notification.
+ // Receives MIDI related event notifications from CoreMIDI.
+ static void ReceiveMidiNotifyDispatch(const MIDINotification* message,
+ void* refcon);
+ void ReceiveMidiNotify(const MIDINotification* message);
+
// CoreMIDI callback for MIDI data.
// Each callback can contain multiple packets, each of which can contain
// multiple MIDI messages.
- static void ReadMidiDispatch(
- const MIDIPacketList *pktlist,
- void *read_proc_refcon,
- void *src_conn_refcon);
+ static void ReadMidiDispatch(const MIDIPacketList* packet_list,
+ void* read_proc_refcon,
+ void* src_conn_refcon);
virtual void ReadMidi(MIDIEndpointRef source, const MIDIPacketList *pktlist);
// An internal callback that runs on MidiSendThread.
@@ -61,19 +68,15 @@ class MEDIA_EXPORT MidiManagerMac : public MidiManager {
MIDIClientRef midi_client_;
MIDIPortRef coremidi_input_;
MIDIPortRef coremidi_output_;
-
- enum{ kMaxPacketListSize = 512 };
- char midi_buffer_[kMaxPacketListSize];
- MIDIPacketList* packet_list_;
- MIDIPacket* midi_packet_;
-
- typedef std::map<MIDIEndpointRef, uint32> SourceMap;
+ std::vector<uint8> midi_buffer_;
// Keeps track of the index (0-based) for each of our sources.
+ typedef std::map<MIDIEndpointRef, uint32> SourceMap;
SourceMap source_map_;
// Keeps track of all destinations.
- std::vector<MIDIEndpointRef> destinations_;
+ typedef std::vector<MIDIEndpointRef> DestinationVector;
+ DestinationVector destinations_;
// |client_thread_| is used to handle platform dependent operations.
base::Thread client_thread_;
@@ -84,6 +87,7 @@ class MEDIA_EXPORT MidiManagerMac : public MidiManager {
DISALLOW_COPY_AND_ASSIGN(MidiManagerMac);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_MANAGER_MAC_H_
diff --git a/chromium/media/midi/midi_manager_mac_unittest.cc b/chromium/media/midi/midi_manager_mac_unittest.cc
new file mode 100644
index 00000000000..8bd75f2d650
--- /dev/null
+++ b/chromium/media/midi/midi_manager_mac_unittest.cc
@@ -0,0 +1,162 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_manager_mac.h"
+
+#include <CoreMIDI/MIDIServices.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/lock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace midi {
+
+namespace {
+
+void Noop(const MIDIPacketList*, void*, void*) {}
+
+class FakeMidiManagerClient : public MidiManagerClient {
+ public:
+ FakeMidiManagerClient()
+ : result_(MIDI_NOT_SUPPORTED),
+ wait_for_result_(true),
+ wait_for_port_(true),
+ unexpected_callback_(false) {}
+
+ // MidiManagerClient implementation.
+ void AddInputPort(const MidiPortInfo& info) override {}
+ void AddOutputPort(const MidiPortInfo& info) override {
+ base::AutoLock lock(lock_);
+ // AddOutputPort may be called before CompleteStartSession() is invoked
+ // if one or more MIDI devices including virtual ports are connected.
+ // Just ignore in such cases.
+ if (wait_for_result_)
+ return;
+
+ // Check if this is the first call after CompleteStartSession(), and
+ // the case should not happen twice.
+ if (!wait_for_port_)
+ unexpected_callback_ = true;
+
+ info_ = info;
+ wait_for_port_ = false;
+ }
+ void SetInputPortState(uint32 port_index, MidiPortState state) override {}
+ void SetOutputPortState(uint32 port_index, MidiPortState state) override {}
+
+ void CompleteStartSession(MidiResult result) override {
+ base::AutoLock lock(lock_);
+ if (!wait_for_result_)
+ unexpected_callback_ = true;
+
+ result_ = result;
+ wait_for_result_ = false;
+ }
+
+ void ReceiveMidiData(uint32 port_index, const uint8* data, size_t size,
+ double timestamp) override {}
+ void AccumulateMidiBytesSent(size_t size) override {}
+
+ bool GetWaitForResult() {
+ base::AutoLock lock(lock_);
+ return wait_for_result_;
+ }
+
+ bool GetWaitForPort() {
+ base::AutoLock lock(lock_);
+ return wait_for_port_;
+ }
+
+ MidiResult WaitForResult() {
+ while (GetWaitForResult()) {
+ base::RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+ EXPECT_FALSE(unexpected_callback_);
+ return result_;
+ }
+ MidiPortInfo WaitForPort() {
+ while (GetWaitForPort()) {
+ base::RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+ EXPECT_FALSE(unexpected_callback_);
+ return info_;
+ }
+
+ private:
+ base::Lock lock_;
+ MidiResult result_;
+ bool wait_for_result_;
+ MidiPortInfo info_;
+ bool wait_for_port_;
+ bool unexpected_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeMidiManagerClient);
+};
+
+class MidiManagerMacTest : public ::testing::Test {
+ public:
+ MidiManagerMacTest()
+ : manager_(new MidiManagerMac),
+ message_loop_(new base::MessageLoop) {}
+
+ protected:
+ void StartSession(MidiManagerClient* client) {
+ manager_->StartSession(client);
+ }
+ void EndSession(MidiManagerClient* client) {
+ manager_->EndSession(client);
+ }
+
+ private:
+ scoped_ptr<MidiManager> manager_;
+ scoped_ptr<base::MessageLoop> message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerMacTest);
+};
+
+
+TEST_F(MidiManagerMacTest, MidiNotification) {
+ scoped_ptr<FakeMidiManagerClient> client(new FakeMidiManagerClient);
+ StartSession(client.get());
+
+ MidiResult result = client->WaitForResult();
+ EXPECT_EQ(MIDI_OK, result);
+
+ // Create MIDIClient, and MIDIEndpoint as a MIDIDestination. This should
+ // notify MIDIManagerMac as a MIDIObjectAddRemoveNotification.
+ MIDIClientRef midi_client = 0;
+ OSStatus status = MIDIClientCreate(
+ CFSTR("MidiManagerMacTest"), nullptr, nullptr, &midi_client);
+ EXPECT_EQ(noErr, status);
+
+ MIDIEndpointRef ep = 0;
+ status = MIDIDestinationCreate(
+ midi_client, CFSTR("DestinationTest"), Noop, nullptr, &ep);
+ EXPECT_EQ(noErr, status);
+ SInt32 id;
+ status = MIDIObjectGetIntegerProperty(ep, kMIDIPropertyUniqueID, &id);
+ EXPECT_EQ(noErr, status);
+ EXPECT_NE(0, id);
+
+ // Wait until the created device is notified to MidiManagerMac.
+ MidiPortInfo info = client->WaitForPort();
+ EXPECT_EQ("DestinationTest", info.name);
+
+ EndSession(client.get());
+ if (ep)
+ MIDIEndpointDispose(ep);
+ if (midi_client)
+ MIDIClientDispose(midi_client);
+}
+
+} // namespace
+
+} // namespace midi
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_unittest.cc b/chromium/media/midi/midi_manager_unittest.cc
index 40d9152b712..1b6f638ee7b 100644
--- a/chromium/media/midi/midi_manager_unittest.cc
+++ b/chromium/media/midi/midi_manager_unittest.cc
@@ -12,9 +12,11 @@
#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/system_monitor/system_monitor.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
+namespace midi {
namespace {
@@ -62,6 +64,8 @@ class FakeMidiManagerClient : public MidiManagerClient {
// MidiManagerClient implementation.
void AddInputPort(const MidiPortInfo& info) override {}
void AddOutputPort(const MidiPortInfo& info) override {}
+ void SetInputPortState(uint32 port_index, MidiPortState state) override {}
+ void SetOutputPortState(uint32 port_index, MidiPortState state) override {}
void CompleteStartSession(MidiResult result) override {
EXPECT_TRUE(wait_for_result_);
@@ -243,6 +247,9 @@ TEST_F(MidiManagerTest, AbortSession) {
}
TEST_F(MidiManagerTest, CreateMidiManager) {
+ // SystemMonitor is needed on Windows.
+ base::SystemMonitor system_monitor;
+
scoped_ptr<FakeMidiManagerClient> client;
client.reset(new FakeMidiManagerClient);
@@ -252,8 +259,8 @@ TEST_F(MidiManagerTest, CreateMidiManager) {
MidiResult result = client->WaitForResult();
// This #ifdef needs to be identical to the one in media/midi/midi_manager.cc.
// Do not change the condition for disabling this test.
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(USE_ALSA) && \
- !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+#if !defined(OS_MACOSX) && !defined(OS_WIN) && \
+ !(defined(USE_ALSA) && defined(USE_UDEV)) && !defined(OS_ANDROID)
EXPECT_EQ(MIDI_NOT_SUPPORTED, result);
#elif defined(USE_ALSA)
// Temporary until http://crbug.com/371230 is resolved.
@@ -265,4 +272,5 @@ TEST_F(MidiManagerTest, CreateMidiManager) {
} // namespace
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager_usb.cc b/chromium/media/midi/midi_manager_usb.cc
index fcefdf5a522..3e096d233c7 100644
--- a/chromium/media/midi/midi_manager_usb.cc
+++ b/chromium/media/midi/midi_manager_usb.cc
@@ -4,18 +4,14 @@
#include "media/midi/midi_manager_usb.h"
-#include "base/callback.h"
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
+#include "media/midi/midi_scheduler.h"
#include "media/midi/usb_midi_descriptor_parser.h"
-#include "media/midi/usb_midi_device.h"
-#include "media/midi/usb_midi_input_stream.h"
-#include "media/midi/usb_midi_jack.h"
-#include "media/midi/usb_midi_output_stream.h"
namespace media {
+namespace midi {
MidiManagerUsb::MidiManagerUsb(scoped_ptr<UsbMidiDevice::Factory> factory)
: device_factory_(factory.Pass()) {
@@ -32,6 +28,7 @@ void MidiManagerUsb::StartInitialization() {
void MidiManagerUsb::Initialize(
base::Callback<void(MidiResult result)> callback) {
initialize_callback_ = callback;
+ scheduler_.reset(new MidiScheduler(this));
// This is safe because EnumerateDevices cancels the operation on destruction.
device_factory_->EnumerateDevices(
this,
@@ -43,9 +40,18 @@ void MidiManagerUsb::DispatchSendMidiData(MidiManagerClient* client,
uint32_t port_index,
const std::vector<uint8>& data,
double timestamp) {
- DCHECK_LT(port_index, output_streams_.size());
- output_streams_[port_index]->Send(data);
- client->AccumulateMidiBytesSent(data.size());
+ if (port_index >= output_streams_.size()) {
+ // |port_index| is provided by a renderer so we can't believe that it is
+ // in the valid range.
+ return;
+ }
+ // output_streams_[port_index] is alive unless MidiManagerUsb is deleted.
+ // The task posted to the MidiScheduler will be disposed safely on deleting
+ // the scheduler.
+ scheduler_->PostSendDataTask(
+ client, data.size(), timestamp,
+ base::Bind(&UsbMidiOutputStream::Send,
+ base::Unretained(output_streams_[port_index]), data));
}
void MidiManagerUsb::ReceiveUsbMidiData(UsbMidiDevice* device,
@@ -62,11 +68,35 @@ void MidiManagerUsb::ReceiveUsbMidiData(UsbMidiDevice* device,
time);
}
+void MidiManagerUsb::OnDeviceAttached(scoped_ptr<UsbMidiDevice> device) {
+ int device_id = static_cast<int>(devices_.size());
+ devices_.push_back(device.Pass());
+ AddPorts(devices_.back(), device_id);
+}
+
+void MidiManagerUsb::OnDeviceDetached(size_t index) {
+ if (index >= devices_.size()) {
+ return;
+ }
+ UsbMidiDevice* device = devices_[index];
+ for (size_t i = 0; i < output_streams_.size(); ++i) {
+ if (output_streams_[i]->jack().device == device) {
+ SetOutputPortState(static_cast<uint32>(i), MIDI_PORT_DISCONNECTED);
+ }
+ }
+ const std::vector<UsbMidiJack>& input_jacks = input_stream_->jacks();
+ for (size_t i = 0; i < input_jacks.size(); ++i) {
+ if (input_jacks[i].device == device) {
+ SetInputPortState(static_cast<uint32>(i), MIDI_PORT_DISCONNECTED);
+ }
+ }
+}
+
void MidiManagerUsb::OnReceivedData(size_t jack_index,
const uint8* data,
size_t size,
base::TimeTicks time) {
- ReceiveMidiData(jack_index, data, size, time);
+ ReceiveMidiData(static_cast<uint32>(jack_index), data, size, time);
}
@@ -76,48 +106,52 @@ void MidiManagerUsb::OnEnumerateDevicesDone(bool result,
initialize_callback_.Run(MIDI_INITIALIZATION_ERROR);
return;
}
+ input_stream_.reset(new UsbMidiInputStream(this));
devices->swap(devices_);
for (size_t i = 0; i < devices_.size(); ++i) {
- UsbMidiDescriptorParser parser;
- std::vector<uint8> descriptor = devices_[i]->GetDescriptor();
- const uint8* data = descriptor.size() > 0 ? &descriptor[0] : NULL;
- std::vector<UsbMidiJack> jacks;
- bool parse_result = parser.Parse(devices_[i],
- data,
- descriptor.size(),
- &jacks);
- if (!parse_result) {
+ if (!AddPorts(devices_[i], static_cast<int>(i))) {
initialize_callback_.Run(MIDI_INITIALIZATION_ERROR);
return;
}
- std::vector<UsbMidiJack> input_jacks;
- for (size_t j = 0; j < jacks.size(); ++j) {
- if (jacks[j].direction() == UsbMidiJack::DIRECTION_OUT) {
- output_streams_.push_back(new UsbMidiOutputStream(jacks[j]));
- // TODO(yhirano): Set appropriate properties.
- // TODO(yhiran): Port ID should contain product ID / vendor ID.
- // Port ID must be unique in a MIDI manager. This (and the below) ID
- // setting is sufficiently unique although there is no user-friendly
- // meaning.
- MidiPortInfo port;
- port.id = base::StringPrintf("port-%ld-%ld",
- static_cast<long>(i),
- static_cast<long>(j));
- AddOutputPort(port);
- } else {
- DCHECK_EQ(jacks[j].direction(), UsbMidiJack::DIRECTION_IN);
- input_jacks.push_back(jacks[j]);
- // TODO(yhirano): Set appropriate properties.
- MidiPortInfo port;
- port.id = base::StringPrintf("port-%ld-%ld",
- static_cast<long>(i),
- static_cast<long>(j));
- AddInputPort(port);
- }
- }
- input_stream_.reset(new UsbMidiInputStream(input_jacks, this));
}
initialize_callback_.Run(MIDI_OK);
}
+bool MidiManagerUsb::AddPorts(UsbMidiDevice* device, int device_id) {
+ UsbMidiDescriptorParser parser;
+ std::vector<uint8> descriptor = device->GetDescriptors();
+ const uint8* data = descriptor.size() > 0 ? &descriptor[0] : NULL;
+ std::vector<UsbMidiJack> jacks;
+ bool parse_result = parser.Parse(device,
+ data,
+ descriptor.size(),
+ &jacks);
+ if (!parse_result)
+ return false;
+
+ std::string manufacturer(device->GetManufacturer());
+ std::string product_name(device->GetProductName());
+ std::string version(device->GetDeviceVersion());
+
+ for (size_t j = 0; j < jacks.size(); ++j) {
+ // Port ID must be unique in a MIDI manager. This ID setting is
+ // sufficiently unique although there is no user-friendly meaning.
+ // TODO(yhirano): Use a hashed string as ID.
+ std::string id(
+ base::StringPrintf("port-%d-%ld", device_id, static_cast<long>(j)));
+ if (jacks[j].direction() == UsbMidiJack::DIRECTION_OUT) {
+ output_streams_.push_back(new UsbMidiOutputStream(jacks[j]));
+ AddOutputPort(MidiPortInfo(id, manufacturer, product_name, version,
+ MIDI_PORT_OPENED));
+ } else {
+ DCHECK_EQ(jacks[j].direction(), UsbMidiJack::DIRECTION_IN);
+ input_stream_->Add(jacks[j]);
+ AddInputPort(MidiPortInfo(id, manufacturer, product_name, version,
+ MIDI_PORT_OPENED));
+ }
+ }
+ return true;
+}
+
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager_usb.h b/chromium/media/midi/midi_manager_usb.h
index d15a82831ba..f5316bbed5e 100644
--- a/chromium/media/midi/midi_manager_usb.h
+++ b/chromium/media/midi/midi_manager_usb.h
@@ -11,22 +11,27 @@
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/callback.h"
+#include "base/compiler_specific.h"
#include "base/containers/hash_tables.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
-#include "media/base/media_export.h"
#include "media/midi/midi_manager.h"
#include "media/midi/usb_midi_device.h"
+#include "media/midi/usb_midi_export.h"
#include "media/midi/usb_midi_input_stream.h"
#include "media/midi/usb_midi_jack.h"
#include "media/midi/usb_midi_output_stream.h"
namespace media {
+namespace midi {
+
+class MidiScheduler;
// MidiManager for USB-MIDI.
-class MEDIA_EXPORT MidiManagerUsb : public MidiManager,
- public UsbMidiDeviceDelegate,
- public UsbMidiInputStream::Delegate {
+class USB_MIDI_EXPORT MidiManagerUsb
+ : public MidiManager,
+ public UsbMidiDeviceDelegate,
+ NON_EXPORTED_BASE(public UsbMidiInputStream::Delegate) {
public:
explicit MidiManagerUsb(scoped_ptr<UsbMidiDevice::Factory> device_factory);
~MidiManagerUsb() override;
@@ -44,6 +49,8 @@ class MEDIA_EXPORT MidiManagerUsb : public MidiManager,
const uint8* data,
size_t size,
base::TimeTicks time) override;
+ void OnDeviceAttached(scoped_ptr<UsbMidiDevice> device) override;
+ void OnDeviceDetached(size_t index) override;
// UsbMidiInputStream::Delegate implementation.
void OnReceivedData(size_t jack_index,
@@ -67,6 +74,7 @@ class MEDIA_EXPORT MidiManagerUsb : public MidiManager,
private:
void OnEnumerateDevicesDone(bool result, UsbMidiDevice::Devices* devices);
+ bool AddPorts(UsbMidiDevice* device, int device_id);
scoped_ptr<UsbMidiDevice::Factory> device_factory_;
ScopedVector<UsbMidiDevice> devices_;
@@ -78,9 +86,12 @@ class MEDIA_EXPORT MidiManagerUsb : public MidiManager,
// A map from <endpoint_number, cable_number> to the index of input jacks.
base::hash_map<std::pair<int, int>, size_t> input_jack_dictionary_;
+ scoped_ptr<MidiScheduler> scheduler_;
+
DISALLOW_COPY_AND_ASSIGN(MidiManagerUsb);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_MANAGER_USB_H_
diff --git a/chromium/media/midi/midi_manager_usb_unittest.cc b/chromium/media/midi/midi_manager_usb_unittest.cc
index 4df64a62dcb..f43e93cf31f 100644
--- a/chromium/media/midi/midi_manager_usb_unittest.cc
+++ b/chromium/media/midi/midi_manager_usb_unittest.cc
@@ -14,6 +14,7 @@
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
+namespace midi {
namespace {
@@ -45,11 +46,15 @@ class FakeUsbMidiDevice : public UsbMidiDevice {
explicit FakeUsbMidiDevice(Logger* logger) : logger_(logger) {}
~FakeUsbMidiDevice() override {}
- std::vector<uint8> GetDescriptor() override {
- logger_->AddLog("UsbMidiDevice::GetDescriptor\n");
- return descriptor_;
+ std::vector<uint8> GetDescriptors() override {
+ logger_->AddLog("UsbMidiDevice::GetDescriptors\n");
+ return descriptors_;
}
+ std::string GetManufacturer() override { return manufacturer_; }
+ std::string GetProductName() override { return product_name_; }
+ std::string GetDeviceVersion() override { return device_version_; }
+
void Send(int endpoint_number, const std::vector<uint8>& data) override {
logger_->AddLog("UsbMidiDevice::Send ");
logger_->AddLog(base::StringPrintf("endpoint = %d data =",
@@ -59,12 +64,24 @@ class FakeUsbMidiDevice : public UsbMidiDevice {
logger_->AddLog("\n");
}
- void SetDescriptor(const std::vector<uint8> descriptor) {
- descriptor_ = descriptor;
+ void SetDescriptors(const std::vector<uint8> descriptors) {
+ descriptors_ = descriptors;
+ }
+ void SetManufacturer(const std::string& manufacturer) {
+ manufacturer_ = manufacturer;
+ }
+ void SetProductName(const std::string& product_name) {
+ product_name_ = product_name;
+ }
+ void SetDeviceVersion(const std::string& device_version) {
+ device_version_ = device_version;
}
private:
- std::vector<uint8> descriptor_;
+ std::vector<uint8> descriptors_;
+ std::string manufacturer_;
+ std::string product_name_;
+ std::string device_version_;
Logger* logger_;
DISALLOW_COPY_AND_ASSIGN(FakeUsbMidiDevice);
@@ -86,6 +103,10 @@ class FakeMidiManagerClient : public MidiManagerClient {
output_ports_.push_back(info);
}
+ void SetInputPortState(uint32 port_index, MidiPortState state) override {}
+
+ void SetOutputPortState(uint32 port_index, MidiPortState state) override {}
+
void CompleteStartSession(MidiResult result) override {
complete_start_session_ = true;
result_ = result;
@@ -211,7 +232,7 @@ class MidiManagerUsbTest : public ::testing::Test {
TEST_F(MidiManagerUsbTest, Initialize) {
scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
- uint8 descriptor[] = {
+ uint8 descriptors[] = {
0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
@@ -227,27 +248,119 @@ TEST_F(MidiManagerUsbTest, Initialize) {
0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
0x05, 0x25, 0x01, 0x01, 0x07,
};
- device->SetDescriptor(ToVector(descriptor));
+ device->SetDescriptors(ToVector(descriptors));
+ device->SetManufacturer("vendor1");
+ device->SetProductName("device1");
+ device->SetDeviceVersion("1.02");
Initialize();
ScopedVector<UsbMidiDevice> devices;
- devices.push_back(device.release());
+ devices.push_back(device.Pass());
EXPECT_FALSE(IsInitializationCallbackInvoked());
RunCallbackUntilCallbackInvoked(true, &devices);
EXPECT_EQ(MIDI_OK, GetInitializationResult());
ASSERT_EQ(1u, input_ports().size());
+ EXPECT_EQ("port-0-2", input_ports()[0].id);
+ EXPECT_EQ("vendor1", input_ports()[0].manufacturer);
+ EXPECT_EQ("device1", input_ports()[0].name);
+ EXPECT_EQ("1.02", input_ports()[0].version);
+
ASSERT_EQ(2u, output_ports().size());
+ EXPECT_EQ("port-0-0", output_ports()[0].id);
+ EXPECT_EQ("vendor1", output_ports()[0].manufacturer);
+ EXPECT_EQ("device1", output_ports()[0].name);
+ EXPECT_EQ("1.02", output_ports()[0].version);
+ EXPECT_EQ("port-0-1", output_ports()[1].id);
+ EXPECT_EQ("vendor1", output_ports()[1].manufacturer);
+ EXPECT_EQ("device1", output_ports()[1].name);
+ EXPECT_EQ("1.02", output_ports()[1].version);
+
ASSERT_TRUE(manager_->input_stream());
- std::vector<UsbMidiInputStream::JackUniqueKey> keys =
- manager_->input_stream()->RegisteredJackKeysForTesting();
+ std::vector<UsbMidiJack> jacks = manager_->input_stream()->jacks();
ASSERT_EQ(2u, manager_->output_streams().size());
EXPECT_EQ(2u, manager_->output_streams()[0]->jack().jack_id);
EXPECT_EQ(3u, manager_->output_streams()[1]->jack().jack_id);
- ASSERT_EQ(1u, keys.size());
- EXPECT_EQ(2, keys[0].endpoint_number);
+ ASSERT_EQ(1u, jacks.size());
+ EXPECT_EQ(2, jacks[0].endpoint_number());
+
+ EXPECT_EQ("UsbMidiDevice::GetDescriptors\n", logger_.TakeLog());
+}
+
+TEST_F(MidiManagerUsbTest, InitializeMultipleDevices) {
+ scoped_ptr<FakeUsbMidiDevice> device1(new FakeUsbMidiDevice(&logger_));
+ scoped_ptr<FakeUsbMidiDevice> device2(new FakeUsbMidiDevice(&logger_));
+ uint8 descriptors[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a, 0x2d, 0x75,
+ 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02, 0x75, 0x00, 0x02, 0x01,
+ 0x00, 0x80, 0x30, 0x09, 0x04, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00,
+ 0x09, 0x24, 0x01, 0x00, 0x01, 0x09, 0x00, 0x01, 0x01, 0x09, 0x04, 0x01,
+ 0x00, 0x02, 0x01, 0x03, 0x00, 0x00, 0x07, 0x24, 0x01, 0x00, 0x01, 0x51,
+ 0x00, 0x06, 0x24, 0x02, 0x01, 0x02, 0x00, 0x06, 0x24, 0x02, 0x01, 0x03,
+ 0x00, 0x06, 0x24, 0x02, 0x02, 0x06, 0x00, 0x09, 0x24, 0x03, 0x01, 0x07,
+ 0x01, 0x06, 0x01, 0x00, 0x09, 0x24, 0x03, 0x02, 0x04, 0x01, 0x02, 0x01,
+ 0x00, 0x09, 0x24, 0x03, 0x02, 0x05, 0x01, 0x03, 0x01, 0x00, 0x09, 0x05,
+ 0x02, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, 0x06, 0x25, 0x01, 0x02, 0x02,
+ 0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, 0x05, 0x25,
+ 0x01, 0x01, 0x07,
+ };
+ device1->SetDescriptors(ToVector(descriptors));
+ device1->SetManufacturer("vendor1");
+ device1->SetProductName("device1");
+ device1->SetDeviceVersion("1.02");
+ device2->SetDescriptors(ToVector(descriptors));
+ device2->SetManufacturer("vendor2");
+ device2->SetProductName("device2");
+ device2->SetDeviceVersion("98.76");
+
+ Initialize();
+ ScopedVector<UsbMidiDevice> devices;
+ devices.push_back(device1.Pass());
+ devices.push_back(device2.Pass());
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(true, &devices);
+ EXPECT_EQ(MIDI_OK, GetInitializationResult());
+
+ ASSERT_EQ(2u, input_ports().size());
+ EXPECT_EQ("port-0-2", input_ports()[0].id);
+ EXPECT_EQ("vendor1", input_ports()[0].manufacturer);
+ EXPECT_EQ("device1", input_ports()[0].name);
+ EXPECT_EQ("1.02", input_ports()[0].version);
+ EXPECT_EQ("port-1-2", input_ports()[1].id);
+ EXPECT_EQ("vendor2", input_ports()[1].manufacturer);
+ EXPECT_EQ("device2", input_ports()[1].name);
+ EXPECT_EQ("98.76", input_ports()[1].version);
+
+ ASSERT_EQ(4u, output_ports().size());
+ EXPECT_EQ("port-0-0", output_ports()[0].id);
+ EXPECT_EQ("vendor1", output_ports()[0].manufacturer);
+ EXPECT_EQ("device1", output_ports()[0].name);
+ EXPECT_EQ("1.02", output_ports()[0].version);
+ EXPECT_EQ("port-0-1", output_ports()[1].id);
+ EXPECT_EQ("vendor1", output_ports()[1].manufacturer);
+ EXPECT_EQ("device1", output_ports()[1].name);
+ EXPECT_EQ("1.02", output_ports()[1].version);
+ EXPECT_EQ("port-1-0", output_ports()[2].id);
+ EXPECT_EQ("vendor2", output_ports()[2].manufacturer);
+ EXPECT_EQ("device2", output_ports()[2].name);
+ EXPECT_EQ("98.76", output_ports()[2].version);
+ EXPECT_EQ("port-1-1", output_ports()[3].id);
+ EXPECT_EQ("vendor2", output_ports()[3].manufacturer);
+ EXPECT_EQ("device2", output_ports()[3].name);
+ EXPECT_EQ("98.76", output_ports()[3].version);
+
+ ASSERT_TRUE(manager_->input_stream());
+ std::vector<UsbMidiJack> jacks = manager_->input_stream()->jacks();
+ ASSERT_EQ(4u, manager_->output_streams().size());
+ EXPECT_EQ(2u, manager_->output_streams()[0]->jack().jack_id);
+ EXPECT_EQ(3u, manager_->output_streams()[1]->jack().jack_id);
+ ASSERT_EQ(2u, jacks.size());
+ EXPECT_EQ(2, jacks[0].endpoint_number());
- EXPECT_EQ("UsbMidiDevice::GetDescriptor\n", logger_.TakeLog());
+ EXPECT_EQ(
+ "UsbMidiDevice::GetDescriptors\n"
+ "UsbMidiDevice::GetDescriptors\n",
+ logger_.TakeLog());
}
TEST_F(MidiManagerUsbTest, InitializeFail) {
@@ -258,24 +371,24 @@ TEST_F(MidiManagerUsbTest, InitializeFail) {
EXPECT_EQ(MIDI_INITIALIZATION_ERROR, GetInitializationResult());
}
-TEST_F(MidiManagerUsbTest, InitializeFailBecauseOfInvalidDescriptor) {
+TEST_F(MidiManagerUsbTest, InitializeFailBecauseOfInvalidDescriptors) {
scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
- uint8 descriptor[] = {0x04};
- device->SetDescriptor(ToVector(descriptor));
+ uint8 descriptors[] = {0x04};
+ device->SetDescriptors(ToVector(descriptors));
Initialize();
ScopedVector<UsbMidiDevice> devices;
- devices.push_back(device.release());
+ devices.push_back(device.Pass());
EXPECT_FALSE(IsInitializationCallbackInvoked());
RunCallbackUntilCallbackInvoked(true, &devices);
EXPECT_EQ(MIDI_INITIALIZATION_ERROR, GetInitializationResult());
- EXPECT_EQ("UsbMidiDevice::GetDescriptor\n", logger_.TakeLog());
+ EXPECT_EQ("UsbMidiDevice::GetDescriptors\n", logger_.TakeLog());
}
TEST_F(MidiManagerUsbTest, Send) {
+ Initialize();
scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
- FakeMidiManagerClient client(&logger_);
- uint8 descriptor[] = {
+ uint8 descriptors[] = {
0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
@@ -292,22 +405,25 @@ TEST_F(MidiManagerUsbTest, Send) {
0x05, 0x25, 0x01, 0x01, 0x07,
};
- device->SetDescriptor(ToVector(descriptor));
+ device->SetDescriptors(ToVector(descriptors));
uint8 data[] = {
0x90, 0x45, 0x7f,
0xf0, 0x00, 0x01, 0xf7,
};
- Initialize();
ScopedVector<UsbMidiDevice> devices;
- devices.push_back(device.release());
+ devices.push_back(device.Pass());
EXPECT_FALSE(IsInitializationCallbackInvoked());
RunCallbackUntilCallbackInvoked(true, &devices);
EXPECT_EQ(MIDI_OK, GetInitializationResult());
ASSERT_EQ(2u, manager_->output_streams().size());
- manager_->DispatchSendMidiData(&client, 1, ToVector(data), 0);
- EXPECT_EQ("UsbMidiDevice::GetDescriptor\n"
+ manager_->DispatchSendMidiData(client_.get(), 1, ToVector(data), 0);
+ // Since UsbMidiDevice::Send is posted as a task, RunLoop should run to
+ // invoke the task.
+ base::RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ EXPECT_EQ("UsbMidiDevice::GetDescriptors\n"
"UsbMidiDevice::Send endpoint = 2 data = "
"0x19 0x90 0x45 0x7f "
"0x14 0xf0 0x00 0x01 "
@@ -316,9 +432,52 @@ TEST_F(MidiManagerUsbTest, Send) {
logger_.TakeLog());
}
+TEST_F(MidiManagerUsbTest, SendFromCompromizedRenderer) {
+ scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
+ uint8 descriptors[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
+ 0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
+ 0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x09, 0x24, 0x01, 0x00,
+ 0x01, 0x09, 0x00, 0x01, 0x01, 0x09, 0x04, 0x01, 0x00, 0x02,
+ 0x01, 0x03, 0x00, 0x00, 0x07, 0x24, 0x01, 0x00, 0x01, 0x51,
+ 0x00, 0x06, 0x24, 0x02, 0x01, 0x02, 0x00, 0x06, 0x24, 0x02,
+ 0x01, 0x03, 0x00, 0x06, 0x24, 0x02, 0x02, 0x06, 0x00, 0x09,
+ 0x24, 0x03, 0x01, 0x07, 0x01, 0x06, 0x01, 0x00, 0x09, 0x24,
+ 0x03, 0x02, 0x04, 0x01, 0x02, 0x01, 0x00, 0x09, 0x24, 0x03,
+ 0x02, 0x05, 0x01, 0x03, 0x01, 0x00, 0x09, 0x05, 0x02, 0x02,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x06, 0x25, 0x01, 0x02, 0x02,
+ 0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x25, 0x01, 0x01, 0x07,
+ };
+
+ device->SetDescriptors(ToVector(descriptors));
+ uint8 data[] = {
+ 0x90, 0x45, 0x7f,
+ 0xf0, 0x00, 0x01, 0xf7,
+ };
+
+ Initialize();
+ ScopedVector<UsbMidiDevice> devices;
+ devices.push_back(device.Pass());
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(true, &devices);
+ EXPECT_EQ(MIDI_OK, GetInitializationResult());
+ ASSERT_EQ(2u, manager_->output_streams().size());
+ EXPECT_EQ("UsbMidiDevice::GetDescriptors\n", logger_.TakeLog());
+
+ // The specified port index is invalid. The manager must ignore the request.
+ manager_->DispatchSendMidiData(client_.get(), 99, ToVector(data), 0);
+ EXPECT_EQ("", logger_.TakeLog());
+
+ // The specified port index is invalid. The manager must ignore the request.
+ manager_->DispatchSendMidiData(client_.get(), 2, ToVector(data), 0);
+ EXPECT_EQ("", logger_.TakeLog());
+}
+
TEST_F(MidiManagerUsbTest, Receive) {
scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
- uint8 descriptor[] = {
+ uint8 descriptors[] = {
0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
@@ -335,7 +494,7 @@ TEST_F(MidiManagerUsbTest, Receive) {
0x05, 0x25, 0x01, 0x01, 0x07,
};
- device->SetDescriptor(ToVector(descriptor));
+ device->SetDescriptors(ToVector(descriptors));
uint8 data[] = {
0x09, 0x90, 0x45, 0x7f,
0x04, 0xf0, 0x00, 0x01,
@@ -346,7 +505,7 @@ TEST_F(MidiManagerUsbTest, Receive) {
Initialize();
ScopedVector<UsbMidiDevice> devices;
UsbMidiDevice* device_raw = device.get();
- devices.push_back(device.release());
+ devices.push_back(device.Pass());
EXPECT_FALSE(IsInitializationCallbackInvoked());
RunCallbackUntilCallbackInvoked(true, &devices);
EXPECT_EQ(MIDI_OK, GetInitializationResult());
@@ -355,7 +514,7 @@ TEST_F(MidiManagerUsbTest, Receive) {
base::TimeTicks());
Finalize();
- EXPECT_EQ("UsbMidiDevice::GetDescriptor\n"
+ EXPECT_EQ("UsbMidiDevice::GetDescriptors\n"
"MidiManagerClient::ReceiveMidiData port_index = 0 "
"data = 0x90 0x45 0x7f\n"
"MidiManagerClient::ReceiveMidiData port_index = 0 "
@@ -364,6 +523,55 @@ TEST_F(MidiManagerUsbTest, Receive) {
logger_.TakeLog());
}
+TEST_F(MidiManagerUsbTest, AttachDevice) {
+ uint8 descriptors[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
+ 0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
+ 0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x09, 0x24, 0x01, 0x00,
+ 0x01, 0x09, 0x00, 0x01, 0x01, 0x09, 0x04, 0x01, 0x00, 0x02,
+ 0x01, 0x03, 0x00, 0x00, 0x07, 0x24, 0x01, 0x00, 0x01, 0x51,
+ 0x00, 0x06, 0x24, 0x02, 0x01, 0x02, 0x00, 0x06, 0x24, 0x02,
+ 0x01, 0x03, 0x00, 0x06, 0x24, 0x02, 0x02, 0x06, 0x00, 0x09,
+ 0x24, 0x03, 0x01, 0x07, 0x01, 0x06, 0x01, 0x00, 0x09, 0x24,
+ 0x03, 0x02, 0x04, 0x01, 0x02, 0x01, 0x00, 0x09, 0x24, 0x03,
+ 0x02, 0x05, 0x01, 0x03, 0x01, 0x00, 0x09, 0x05, 0x02, 0x02,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x06, 0x25, 0x01, 0x02, 0x02,
+ 0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x25, 0x01, 0x01, 0x07,
+ };
+
+ Initialize();
+ ScopedVector<UsbMidiDevice> devices;
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(true, &devices);
+ EXPECT_EQ(MIDI_OK, GetInitializationResult());
+
+ ASSERT_EQ(0u, input_ports().size());
+ ASSERT_EQ(0u, output_ports().size());
+ ASSERT_TRUE(manager_->input_stream());
+ std::vector<UsbMidiJack> jacks = manager_->input_stream()->jacks();
+ ASSERT_EQ(0u, manager_->output_streams().size());
+ ASSERT_EQ(0u, jacks.size());
+ EXPECT_EQ("", logger_.TakeLog());
+
+ scoped_ptr<FakeUsbMidiDevice> new_device(new FakeUsbMidiDevice(&logger_));
+ new_device->SetDescriptors(ToVector(descriptors));
+ manager_->OnDeviceAttached(new_device.Pass());
+
+ ASSERT_EQ(1u, input_ports().size());
+ ASSERT_EQ(2u, output_ports().size());
+ ASSERT_TRUE(manager_->input_stream());
+ jacks = manager_->input_stream()->jacks();
+ ASSERT_EQ(2u, manager_->output_streams().size());
+ EXPECT_EQ(2u, manager_->output_streams()[0]->jack().jack_id);
+ EXPECT_EQ(3u, manager_->output_streams()[1]->jack().jack_id);
+ ASSERT_EQ(1u, jacks.size());
+ EXPECT_EQ(2, jacks[0].endpoint_number());
+ EXPECT_EQ("UsbMidiDevice::GetDescriptors\n", logger_.TakeLog());
+}
+
} // namespace
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager_win.cc b/chromium/media/midi/midi_manager_win.cc
index bc4b15d08d9..5c81386fc3f 100644
--- a/chromium/media/midi/midi_manager_win.cc
+++ b/chromium/media/midi/midi_manager_win.cc
@@ -5,7 +5,9 @@
#include "media/midi/midi_manager_win.h"
#include <windows.h>
-
+#include <ks.h>
+#include <ksmedia.h>
+#include <mmreg.h>
// Prevent unnecessary functions from being included from <mmsystem.h>
#define MMNODRV
#define MMNOSOUND
@@ -19,19 +21,37 @@
#include <mmsystem.h>
#include <algorithm>
+#include <functional>
+#include <queue>
#include <string>
+
#include "base/bind.h"
+#include "base/containers/hash_tables.h"
#include "base/message_loop/message_loop.h"
+#include "base/strings/string16.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/threading/thread.h"
+#include "base/system_monitor/system_monitor.h"
+#include "base/threading/thread_checker.h"
+#include "base/timer/timer.h"
+#include "base/win/message_window.h"
+#include "device/usb/usb_ids.h"
#include "media/midi/midi_message_queue.h"
#include "media/midi/midi_message_util.h"
#include "media/midi/midi_port_info.h"
namespace media {
+namespace midi {
namespace {
+static const size_t kBufferLength = 32 * 1024;
+
+// We assume that nullpter represents an invalid MIDI handle.
+const HMIDIIN kInvalidMidiInHandle = nullptr;
+const HMIDIOUT kInvalidMidiOutHandle = nullptr;
+
std::string GetInErrorMessage(MMRESULT result) {
wchar_t text[MAXERRORLENGTH];
MMRESULT get_result = midiInGetErrorText(result, text, arraysize(text));
@@ -56,6 +76,10 @@ std::string GetOutErrorMessage(MMRESULT result) {
return base::WideToUTF8(text);
}
+std::string MmversionToString(MMVERSION version) {
+ return base::StringPrintf("%d.%d", HIBYTE(version), LOBYTE(version));
+}
+
class MIDIHDRDeleter {
public:
void operator()(MIDIHDR* header) {
@@ -74,14 +98,14 @@ ScopedMIDIHDR CreateMIDIHDR(size_t size) {
ScopedMIDIHDR header(new MIDIHDR);
ZeroMemory(header.get(), sizeof(*header));
header->lpData = new char[size];
- header->dwBufferLength = size;
+ header->dwBufferLength = static_cast<DWORD>(size);
return header.Pass();
}
void SendShortMidiMessageInternal(HMIDIOUT midi_out_handle,
const std::vector<uint8>& message) {
- if (message.size() >= 4)
- return;
+ DCHECK_LE(message.size(), static_cast<size_t>(3))
+ << "A short MIDI message should be up to 3 bytes.";
DWORD packed_message = 0;
for (size_t i = 0; i < message.size(); ++i)
@@ -94,8 +118,9 @@ void SendShortMidiMessageInternal(HMIDIOUT midi_out_handle,
void SendLongMidiMessageInternal(HMIDIOUT midi_out_handle,
const std::vector<uint8>& message) {
// Implementation note:
- // Sending long MIDI message can be performed synchronously or asynchronously
- // depending on the driver. There are 2 options to support both cases:
+ // Sending a long MIDI message can be performed synchronously or
+ // asynchronously depending on the driver. There are 2 options to support both
+ // cases:
// 1) Call midiOutLongMsg() API and wait for its completion within this
// function. In this approach, we can avoid memory copy by directly pointing
// |message| as the data buffer to be sent.
@@ -119,24 +144,23 @@ void SendLongMidiMessageInternal(HMIDIOUT midi_out_handle,
}
ScopedMIDIHDR midi_header(CreateMIDIHDR(message.size()));
- for (size_t i = 0; i < message.size(); ++i)
- midi_header->lpData[i] = static_cast<char>(message[i]);
+ std::copy(message.begin(), message.end(), midi_header->lpData);
- MMRESULT result = midiOutPrepareHeader(
- midi_out_handle, midi_header.get(), sizeof(*midi_header));
+ MMRESULT result = midiOutPrepareHeader(midi_out_handle, midi_header.get(),
+ sizeof(*midi_header));
if (result != MMSYSERR_NOERROR) {
DLOG(ERROR) << "Failed to prepare output buffer: "
<< GetOutErrorMessage(result);
return;
}
- result = midiOutLongMsg(
- midi_out_handle, midi_header.get(), sizeof(*midi_header));
+ result =
+ midiOutLongMsg(midi_out_handle, midi_header.get(), sizeof(*midi_header));
if (result != MMSYSERR_NOERROR) {
DLOG(ERROR) << "Failed to output long message: "
<< GetOutErrorMessage(result);
- result = midiOutUnprepareHeader(
- midi_out_handle, midi_header.get(), sizeof(*midi_header));
+ result = midiOutUnprepareHeader(midi_out_handle, midi_header.get(),
+ sizeof(*midi_header));
DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
<< "Failed to uninitialize output buffer: "
<< GetOutErrorMessage(result);
@@ -147,452 +171,997 @@ void SendLongMidiMessageInternal(HMIDIOUT midi_out_handle,
midi_header.release();
}
-} // namespace
+template <size_t array_size>
+base::string16 AsString16(const wchar_t(&buffer)[array_size]) {
+ size_t len = 0;
+ for (len = 0; len < array_size; ++len) {
+ if (buffer[len] == L'\0')
+ break;
+ }
+ return base::string16(buffer, len);
+}
-class MidiManagerWin::InDeviceInfo {
- public:
- ~InDeviceInfo() {
- Uninitialize();
+struct MidiDeviceInfo final {
+ explicit MidiDeviceInfo(const MIDIINCAPS2W& caps)
+ : manufacturer_id(caps.wMid),
+ product_id(caps.wPid),
+ driver_version(caps.vDriverVersion),
+ product_name(AsString16(caps.szPname)),
+ usb_vendor_id(ExtractUsbVendorIdIfExists(caps)),
+ usb_product_id(ExtractUsbProductIdIfExists(caps)),
+ is_usb_device(IsUsbDevice(caps)) {}
+ explicit MidiDeviceInfo(const MIDIOUTCAPS2W& caps)
+ : manufacturer_id(caps.wMid),
+ product_id(caps.wPid),
+ driver_version(caps.vDriverVersion),
+ product_name(AsString16(caps.szPname)),
+ usb_vendor_id(ExtractUsbVendorIdIfExists(caps)),
+ usb_product_id(ExtractUsbProductIdIfExists(caps)),
+ is_usb_device(IsUsbDevice(caps)) {}
+ explicit MidiDeviceInfo(const MidiDeviceInfo& info)
+ : manufacturer_id(info.manufacturer_id),
+ product_id(info.product_id),
+ driver_version(info.driver_version),
+ product_name(info.product_name),
+ usb_vendor_id(info.usb_vendor_id),
+ usb_product_id(info.usb_product_id),
+ is_usb_device(info.is_usb_device) {}
+ // Currently only following entities are considered when testing the equality
+ // of two MIDI devices.
+ // TODO(toyoshim): Consider to calculate MIDIPort.id here and use it as the
+ // key. See crbug.com/467448. Then optimize the data for |MidiPortInfo|.
+ const uint16 manufacturer_id;
+ const uint16 product_id;
+ const uint32 driver_version;
+ const base::string16 product_name;
+ const uint16 usb_vendor_id;
+ const uint16 usb_product_id;
+ const bool is_usb_device;
+
+ // Required to be used as the key of base::hash_map.
+ bool operator==(const MidiDeviceInfo& that) const {
+ return manufacturer_id == that.manufacturer_id &&
+ product_id == that.product_id &&
+ driver_version == that.driver_version &&
+ product_name == that.product_name &&
+ is_usb_device == that.is_usb_device &&
+ (is_usb_device && usb_vendor_id == that.usb_vendor_id &&
+ usb_product_id == that.usb_product_id);
+ }
+
+ // Hash function to be used in base::hash_map.
+ struct Hasher {
+ size_t operator()(const MidiDeviceInfo& info) const {
+ size_t hash = info.manufacturer_id;
+ hash *= 131;
+ hash += info.product_id;
+ hash *= 131;
+ hash += info.driver_version;
+ hash *= 131;
+ hash += info.product_name.size();
+ hash *= 131;
+ if (!info.product_name.empty()) {
+ hash += info.product_name[0];
+ }
+ hash *= 131;
+ hash += info.usb_vendor_id;
+ hash *= 131;
+ hash += info.usb_product_id;
+ return hash;
+ }
+ };
+
+ private:
+ static bool IsUsbDevice(const MIDIINCAPS2W& caps) {
+ return IS_COMPATIBLE_USBAUDIO_MID(&caps.ManufacturerGuid) &&
+ IS_COMPATIBLE_USBAUDIO_PID(&caps.ProductGuid);
+ }
+ static bool IsUsbDevice(const MIDIOUTCAPS2W& caps) {
+ return IS_COMPATIBLE_USBAUDIO_MID(&caps.ManufacturerGuid) &&
+ IS_COMPATIBLE_USBAUDIO_PID(&caps.ProductGuid);
+ }
+ static uint16 ExtractUsbVendorIdIfExists(const MIDIINCAPS2W& caps) {
+ if (!IS_COMPATIBLE_USBAUDIO_MID(&caps.ManufacturerGuid))
+ return 0;
+ return EXTRACT_USBAUDIO_MID(&caps.ManufacturerGuid);
}
- void set_port_index(int index) {
- port_index_ = index;
+ static uint16 ExtractUsbVendorIdIfExists(const MIDIOUTCAPS2W& caps) {
+ if (!IS_COMPATIBLE_USBAUDIO_MID(&caps.ManufacturerGuid))
+ return 0;
+ return EXTRACT_USBAUDIO_MID(&caps.ManufacturerGuid);
}
- int port_index() const {
- return port_index_;
+ static uint16 ExtractUsbProductIdIfExists(const MIDIINCAPS2W& caps) {
+ if (!IS_COMPATIBLE_USBAUDIO_PID(&caps.ProductGuid))
+ return 0;
+ return EXTRACT_USBAUDIO_PID(&caps.ProductGuid);
}
- bool device_to_be_closed() const {
- return device_to_be_closed_;
+ static uint16 ExtractUsbProductIdIfExists(const MIDIOUTCAPS2W& caps) {
+ if (!IS_COMPATIBLE_USBAUDIO_PID(&caps.ProductGuid))
+ return 0;
+ return EXTRACT_USBAUDIO_PID(&caps.ProductGuid);
}
- HMIDIIN midi_handle() const {
- return midi_handle_;
+};
+
+std::string GetManufacturerName(const MidiDeviceInfo& info) {
+ if (info.is_usb_device) {
+ const char* name = device::UsbIds::GetVendorName(info.usb_vendor_id);
+ return std::string(name ? name : "");
}
- static scoped_ptr<InDeviceInfo> Create(MidiManagerWin* manager,
- UINT device_id) {
- scoped_ptr<InDeviceInfo> obj(new InDeviceInfo(manager));
- if (!obj->Initialize(device_id))
- obj.reset();
- return obj.Pass();
+ switch (info.manufacturer_id) {
+ case MM_MICROSOFT:
+ return "Microsoft Corporation";
+ default:
+ // TODO(toyoshim): Support other manufacture IDs. crbug.com/472341.
+ return "";
}
+}
+
+using PortNumberCache = base::hash_map<
+ MidiDeviceInfo,
+ std::priority_queue<uint32, std::vector<uint32>, std::greater<uint32>>,
+ MidiDeviceInfo::Hasher>;
+
+struct MidiInputDeviceState final : base::RefCounted<MidiInputDeviceState> {
+ explicit MidiInputDeviceState(const MidiDeviceInfo& device_info)
+ : device_info(device_info),
+ midi_handle(kInvalidMidiInHandle),
+ port_index(0),
+ port_age(0),
+ start_time_initialized(false) {}
+
+ const MidiDeviceInfo device_info;
+ HMIDIIN midi_handle;
+ ScopedMIDIHDR midi_header;
+ // Since Win32 multimedia system uses a relative time offset from when
+ // |midiInStart| API is called, we need to record when it is called.
+ base::TimeTicks start_time;
+ // 0-based port index. We will try to reuse the previous port index when the
+ // MIDI device is closed then reopened.
+ uint32 port_index;
+ // A sequence number which represents how many times |port_index| is reused.
+ // We can remove this field if we decide not to clear unsent events
+ // when the device is disconnected.
+ // See https://github.com/WebAudio/web-midi-api/issues/133
+ uint64 port_age;
+ // True if |start_time| is initialized. This field is not used so far, but
+ // kept for the debugging purpose.
+ bool start_time_initialized;
private:
- static const int kInvalidPortIndex = -1;
- static const size_t kBufferLength = 32 * 1024;
-
- explicit InDeviceInfo(MidiManagerWin* manager)
- : manager_(manager),
- port_index_(kInvalidPortIndex),
- midi_handle_(NULL),
- started_(false),
- device_to_be_closed_(false) {
- }
-
- bool Initialize(DWORD device_id) {
- Uninitialize();
- midi_header_ = CreateMIDIHDR(kBufferLength);
-
- // Here we use |CALLBACK_FUNCTION| to subscribe MIM_DATA, MIM_LONGDATA, and
- // MIM_CLOSE events.
- // - MIM_DATA: This is the only way to get a short MIDI message with
- // timestamp information.
- // - MIM_LONGDATA: This is the only way to get a long MIDI message with
- // timestamp information.
- // - MIM_CLOSE: This event is sent when 1) midiInClose() is called, or 2)
- // the MIDI device becomes unavailable for some reasons, e.g., the cable
- // is disconnected. As for the former case, HMIDIOUT will be invalidated
- // soon after the callback is finished. As for the later case, however,
- // HMIDIOUT continues to be valid until midiInClose() is called.
- MMRESULT result = midiInOpen(&midi_handle_,
- device_id,
- reinterpret_cast<DWORD_PTR>(&HandleMessage),
- reinterpret_cast<DWORD_PTR>(this),
- CALLBACK_FUNCTION);
- if (result != MMSYSERR_NOERROR) {
- DLOG(ERROR) << "Failed to open output device. "
- << " id: " << device_id
- << " message: " << GetInErrorMessage(result);
- return false;
+ friend class base::RefCounted<MidiInputDeviceState>;
+ ~MidiInputDeviceState() {}
+};
+
+struct MidiOutputDeviceState final : base::RefCounted<MidiOutputDeviceState> {
+ explicit MidiOutputDeviceState(const MidiDeviceInfo& device_info)
+ : device_info(device_info),
+ midi_handle(kInvalidMidiOutHandle),
+ port_index(0),
+ port_age(0),
+ closed(false) {}
+
+ const MidiDeviceInfo device_info;
+ HMIDIOUT midi_handle;
+ // 0-based port index. We will try to reuse the previous port index when the
+ // MIDI device is closed then reopened.
+ uint32 port_index;
+ // A sequence number which represents how many times |port_index| is reused.
+ // We can remove this field if we decide not to clear unsent events
+ // when the device is disconnected.
+ // See https://github.com/WebAudio/web-midi-api/issues/133
+ uint64 port_age;
+ // True if the device is already closed and |midi_handle| is considered to be
+ // invalid.
+ // TODO(toyoshim): Use std::atomic<bool> when it is allowed in Chromium
+ // project.
+ volatile bool closed;
+
+ private:
+ friend class base::RefCounted<MidiOutputDeviceState>;
+ ~MidiOutputDeviceState() {}
+};
+
+// The core logic of MIDI device handling for Windows. Basically this class is
+// shared among following 4 threads:
+// 1. Chrome IO Thread
+// 2. OS Multimedia Thread
+// 3. Task Thread
+// 4. Sender Thread
+//
+// Chrome IO Thread:
+// MidiManager runs on Chrome IO thread. Device change notification is
+// delivered to the thread through the SystemMonitor service.
+// OnDevicesChanged() callback is invoked to update the MIDI device list.
+// Note that in the current implementation we will try to open all the
+// existing devices in practice. This is OK because trying to reopen a MIDI
+// device that is already opened would simply fail, and there is no unwilling
+// side effect.
+//
+// OS Multimedia Thread:
+// This thread is maintained by the OS as a part of MIDI runtime, and
+// responsible for receiving all the system initiated events such as device
+// close, and receiving data. For performance reasons, most of potentially
+// blocking operations will be dispatched into Task Thread.
+//
+// Task Thread:
+// This thread will be used to call back following methods of MidiManager.
+// - MidiManager::CompleteInitialization
+// - MidiManager::AddInputPort
+// - MidiManager::AddOutputPort
+// - MidiManager::SetInputPortState
+// - MidiManager::SetOutputPortState
+// - MidiManager::ReceiveMidiData
+//
+// Sender Thread:
+// This thread will be used to call Win32 APIs to send MIDI message at the
+// specified time. We don't want to call MIDI send APIs on Task Thread
+// because those APIs could be performed synchronously, hence they could block
+// the caller thread for a while. See the comment in
+// SendLongMidiMessageInternal for details. Currently we expect that the
+// blocking time would be less than 1 second.
+class MidiServiceWinImpl : public MidiServiceWin,
+ public base::SystemMonitor::DevicesChangedObserver {
+ public:
+ MidiServiceWinImpl()
+ : delegate_(nullptr),
+ sender_thread_("Windows MIDI sender thread"),
+ task_thread_("Windows MIDI task thread"),
+ destructor_started(false) {}
+
+ ~MidiServiceWinImpl() final {
+ // Start() and Stop() of the threads, and AddDevicesChangeObserver() and
+ // RemoveDevicesChangeObserver() should be called on the same thread.
+ CHECK(thread_checker_.CalledOnValidThread());
+
+ destructor_started = true;
+ base::SystemMonitor::Get()->RemoveDevicesChangedObserver(this);
+ {
+ std::vector<HMIDIIN> input_devices;
+ {
+ base::AutoLock auto_lock(input_ports_lock_);
+ for (auto it : input_device_map_)
+ input_devices.push_back(it.first);
+ }
+ {
+ for (const auto handle : input_devices) {
+ MMRESULT result = midiInClose(handle);
+ if (result == MIDIERR_STILLPLAYING) {
+ result = midiInReset(handle);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "midiInReset failed: " << GetInErrorMessage(result);
+ result = midiInClose(handle);
+ }
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "midiInClose failed: " << GetInErrorMessage(result);
+ }
+ }
}
- result = midiInPrepareHeader(
- midi_handle_, midi_header_.get(), sizeof(*midi_header_));
- if (result != MMSYSERR_NOERROR) {
- DLOG(ERROR) << "Failed to initialize input buffer: "
- << GetInErrorMessage(result);
- return false;
+ {
+ std::vector<HMIDIOUT> output_devices;
+ {
+ base::AutoLock auto_lock(output_ports_lock_);
+ for (auto it : output_device_map_)
+ output_devices.push_back(it.first);
+ }
+ {
+ for (const auto handle : output_devices) {
+ MMRESULT result = midiOutClose(handle);
+ if (result == MIDIERR_STILLPLAYING) {
+ result = midiOutReset(handle);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "midiOutReset failed: " << GetOutErrorMessage(result);
+ result = midiOutClose(handle);
+ }
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "midiOutClose failed: " << GetOutErrorMessage(result);
+ }
+ }
}
- result = midiInAddBuffer(
- midi_handle_, midi_header_.get(), sizeof(*midi_header_));
- if (result != MMSYSERR_NOERROR) {
- DLOG(ERROR) << "Failed to attach input buffer: "
- << GetInErrorMessage(result);
- return false;
+ sender_thread_.Stop();
+ task_thread_.Stop();
+ }
+
+ // MidiServiceWin overrides:
+ void InitializeAsync(MidiServiceWinDelegate* delegate) final {
+ // Start() and Stop() of the threads, and AddDevicesChangeObserver() and
+ // RemoveDevicesChangeObserver() should be called on the same thread.
+ CHECK(thread_checker_.CalledOnValidThread());
+
+ delegate_ = delegate;
+
+ sender_thread_.Start();
+ task_thread_.Start();
+
+ // Start monitoring device changes. This should start before the
+ // following UpdateDeviceList() call not to miss the event happening
+ // between the call and the observer registration.
+ base::SystemMonitor::Get()->AddDevicesChangedObserver(this);
+
+ UpdateDeviceList();
+
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiServiceWinImpl::CompleteInitializationOnTaskThread,
+ base::Unretained(this), MIDI_OK));
+ }
+
+ void SendMidiDataAsync(uint32 port_number,
+ const std::vector<uint8>& data,
+ base::TimeTicks time) final {
+ if (destructor_started) {
+ LOG(ERROR) << "ThreadSafeSendData failed because MidiServiceWinImpl is "
+ "being destructed. port: " << port_number;
+ return;
}
- result = midiInStart(midi_handle_);
- if (result != MMSYSERR_NOERROR) {
- DLOG(ERROR) << "Failed to start input port: "
- << GetInErrorMessage(result);
- return false;
+ auto state = GetOutputDeviceFromPort(port_number);
+ if (!state) {
+ LOG(ERROR) << "ThreadSafeSendData failed due to an invalid port number. "
+ << "port: " << port_number;
+ return;
+ }
+ if (state->closed) {
+ LOG(ERROR)
+ << "ThreadSafeSendData failed because target port is already closed."
+ << "port: " << port_number;
+ return;
+ }
+ const auto now = base::TimeTicks::Now();
+ if (now < time) {
+ sender_thread_.message_loop()->PostDelayedTask(
+ FROM_HERE, base::Bind(&MidiServiceWinImpl::SendOnSenderThread,
+ base::Unretained(this), port_number,
+ state->port_age, data, time),
+ time - now);
+ } else {
+ sender_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MidiServiceWinImpl::SendOnSenderThread,
+ base::Unretained(this), port_number,
+ state->port_age, data, time));
}
- started_ = true;
- start_time_ = base::TimeTicks::Now();
- return true;
}
- void Uninitialize() {
- MMRESULT result = MMSYSERR_NOERROR;
- if (midi_handle_ && started_) {
- result = midiInStop(midi_handle_);
- DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
- << "Failed to stop input port: " << GetInErrorMessage(result);
- started_ = false;
- start_time_ = base::TimeTicks();
- }
- if (midi_handle_) {
- // midiInReset flushes pending messages. We ignore these messages.
- device_to_be_closed_ = true;
- result = midiInReset(midi_handle_);
- DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
- << "Failed to reset input port: " << GetInErrorMessage(result);
- result = midiInClose(midi_handle_);
- device_to_be_closed_ = false;
- DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
- << "Failed to close input port: " << GetInErrorMessage(result);
- midi_header_.reset();
- midi_handle_ = NULL;
- port_index_ = kInvalidPortIndex;
- }
- }
-
- static void CALLBACK HandleMessage(HMIDIIN midi_in_handle,
- UINT message,
- DWORD_PTR instance,
- DWORD_PTR param1,
- DWORD_PTR param2) {
- // This method can be called back on any thread depending on Windows
- // multimedia subsystem and underlying MIDI drivers.
- InDeviceInfo* self = reinterpret_cast<InDeviceInfo*>(instance);
- if (!self)
- return;
- if (self->midi_handle() != midi_in_handle)
+ // base::SystemMonitor::DevicesChangedObserver overrides:
+ void OnDevicesChanged(base::SystemMonitor::DeviceType device_type) final {
+ CHECK(thread_checker_.CalledOnValidThread());
+ if (destructor_started)
return;
+ switch (device_type) {
+ case base::SystemMonitor::DEVTYPE_AUDIO_CAPTURE:
+ case base::SystemMonitor::DEVTYPE_VIDEO_CAPTURE:
+ // Add case of other unrelated device types here.
+ return;
+ case base::SystemMonitor::DEVTYPE_UNKNOWN:
+ // Interested in MIDI devices. Try updating the device list.
+ UpdateDeviceList();
+ break;
+ // No default here to capture new DeviceType by compile time.
+ }
+ }
+
+ private:
+ scoped_refptr<MidiInputDeviceState> GetInputDeviceFromHandle(
+ HMIDIIN midi_handle) {
+ base::AutoLock auto_lock(input_ports_lock_);
+ const auto it = input_device_map_.find(midi_handle);
+ return (it != input_device_map_.end() ? it->second : nullptr);
+ }
+
+ scoped_refptr<MidiOutputDeviceState> GetOutputDeviceFromHandle(
+ HMIDIOUT midi_handle) {
+ base::AutoLock auto_lock(output_ports_lock_);
+ const auto it = output_device_map_.find(midi_handle);
+ return (it != output_device_map_.end() ? it->second : nullptr);
+ }
+
+ scoped_refptr<MidiOutputDeviceState> GetOutputDeviceFromPort(
+ uint32 port_number) {
+ base::AutoLock auto_lock(output_ports_lock_);
+ if (output_ports_.size() <= port_number)
+ return nullptr;
+ return output_ports_[port_number];
+ }
+
+ void UpdateDeviceList() {
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MidiServiceWinImpl::UpdateDeviceListOnTaskThread,
+ base::Unretained(this)));
+ }
+
+ /////////////////////////////////////////////////////////////////////////////
+ // Callbacks on the OS multimedia thread.
+ /////////////////////////////////////////////////////////////////////////////
+
+ static void CALLBACK
+ OnMidiInEventOnMainlyMultimediaThread(HMIDIIN midi_in_handle,
+ UINT message,
+ DWORD_PTR instance,
+ DWORD_PTR param1,
+ DWORD_PTR param2) {
+ MidiServiceWinImpl* self = reinterpret_cast<MidiServiceWinImpl*>(instance);
+ if (!self)
+ return;
switch (message) {
+ case MIM_OPEN:
+ self->OnMidiInOpen(midi_in_handle);
+ break;
case MIM_DATA:
- self->OnShortMessageReceived(static_cast<uint8>(param1 & 0xff),
- static_cast<uint8>((param1 >> 8) & 0xff),
- static_cast<uint8>((param1 >> 16) & 0xff),
- param2);
- return;
+ self->OnMidiInDataOnMultimediaThread(midi_in_handle, param1, param2);
+ break;
case MIM_LONGDATA:
- self->OnLongMessageReceived(reinterpret_cast<MIDIHDR*>(param1),
- param2);
- return;
+ self->OnMidiInLongDataOnMultimediaThread(midi_in_handle, param1,
+ param2);
+ break;
case MIM_CLOSE:
- // TODO(yukawa): Implement crbug.com/279097.
- return;
+ self->OnMidiInCloseOnMultimediaThread(midi_in_handle);
+ break;
}
}
- void OnShortMessageReceived(uint8 status_byte,
- uint8 first_data_byte,
- uint8 second_data_byte,
- DWORD elapsed_ms) {
- if (device_to_be_closed())
+ void OnMidiInOpen(HMIDIIN midi_in_handle) {
+ UINT device_id = 0;
+ MMRESULT result = midiInGetID(midi_in_handle, &device_id);
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "midiInGetID failed: " << GetInErrorMessage(result);
return;
- const size_t len = GetMidiMessageLength(status_byte);
- if (len == 0 || port_index() == kInvalidPortIndex)
+ }
+ MIDIINCAPS2W caps = {};
+ result = midiInGetDevCaps(device_id, reinterpret_cast<LPMIDIINCAPSW>(&caps),
+ sizeof(caps));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "midiInGetDevCaps failed: " << GetInErrorMessage(result);
+ return;
+ }
+ auto state =
+ make_scoped_refptr(new MidiInputDeviceState(MidiDeviceInfo(caps)));
+ state->midi_handle = midi_in_handle;
+ state->midi_header = CreateMIDIHDR(kBufferLength);
+ const auto& state_device_info = state->device_info;
+ bool add_new_port = false;
+ uint32 port_number = 0;
+ {
+ base::AutoLock auto_lock(input_ports_lock_);
+ const auto it = unused_input_ports_.find(state_device_info);
+ if (it == unused_input_ports_.end()) {
+ port_number = static_cast<uint32>(input_ports_.size());
+ add_new_port = true;
+ input_ports_.push_back(nullptr);
+ input_ports_ages_.push_back(0);
+ } else {
+ port_number = it->second.top();
+ it->second.pop();
+ if (it->second.empty()) {
+ unused_input_ports_.erase(it);
+ }
+ }
+ input_ports_[port_number] = state;
+
+ input_ports_ages_[port_number] += 1;
+ input_device_map_[input_ports_[port_number]->midi_handle] =
+ input_ports_[port_number];
+ input_ports_[port_number]->port_index = port_number;
+ input_ports_[port_number]->port_age = input_ports_ages_[port_number];
+ }
+ // Several initial startup tasks cannot be done in MIM_OPEN handler.
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MidiServiceWinImpl::StartInputDeviceOnTaskThread,
+ base::Unretained(this), midi_in_handle));
+ if (add_new_port) {
+ const MidiPortInfo port_info(
+ // TODO(toyoshim): Use a hash ID insted crbug.com/467448
+ base::IntToString(static_cast<int>(port_number)),
+ GetManufacturerName(state_device_info),
+ base::WideToUTF8(state_device_info.product_name),
+ MmversionToString(state_device_info.driver_version),
+ MIDI_PORT_OPENED);
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MidiServiceWinImpl::AddInputPortOnTaskThread,
+ base::Unretained(this), port_info));
+ } else {
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiServiceWinImpl::SetInputPortStateOnTaskThread,
+ base::Unretained(this), port_number,
+ MidiPortState::MIDI_PORT_CONNECTED));
+ }
+ }
+
+ void OnMidiInDataOnMultimediaThread(HMIDIIN midi_in_handle,
+ DWORD_PTR param1,
+ DWORD_PTR param2) {
+ auto state = GetInputDeviceFromHandle(midi_in_handle);
+ if (!state)
return;
- const uint8 kData[] = { status_byte, first_data_byte, second_data_byte };
+ const uint8 status_byte = static_cast<uint8>(param1 & 0xff);
+ const uint8 first_data_byte = static_cast<uint8>((param1 >> 8) & 0xff);
+ const uint8 second_data_byte = static_cast<uint8>((param1 >> 16) & 0xff);
+ const DWORD elapsed_ms = param2;
+ const size_t len = GetMidiMessageLength(status_byte);
+ const uint8 kData[] = {status_byte, first_data_byte, second_data_byte};
+ std::vector<uint8> data;
+ data.assign(kData, kData + len);
DCHECK_LE(len, arraysize(kData));
- OnMessageReceived(kData, len, elapsed_ms);
+ // MIM_DATA/MIM_LONGDATA message treats the time when midiInStart() is
+ // called as the origin of |elapsed_ms|.
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757284.aspx
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757286.aspx
+ const base::TimeTicks event_time =
+ state->start_time + base::TimeDelta::FromMilliseconds(elapsed_ms);
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MidiServiceWinImpl::ReceiveMidiDataOnTaskThread,
+ base::Unretained(this), state->port_index, data,
+ event_time));
}
- void OnLongMessageReceived(MIDIHDR* header, DWORD elapsed_ms) {
- if (header != midi_header_.get())
+ void OnMidiInLongDataOnMultimediaThread(HMIDIIN midi_in_handle,
+ DWORD_PTR param1,
+ DWORD_PTR param2) {
+ auto state = GetInputDeviceFromHandle(midi_in_handle);
+ if (!state)
return;
+ MIDIHDR* header = reinterpret_cast<MIDIHDR*>(param1);
+ const DWORD elapsed_ms = param2;
MMRESULT result = MMSYSERR_NOERROR;
- if (device_to_be_closed()) {
- if (midi_header_ &&
- (midi_header_->dwFlags & MHDR_PREPARED) == MHDR_PREPARED) {
- result = midiInUnprepareHeader(
- midi_handle_, midi_header_.get(), sizeof(*midi_header_));
+ if (destructor_started) {
+ if (state->midi_header &&
+ (state->midi_header->dwFlags & MHDR_PREPARED) == MHDR_PREPARED) {
+ result =
+ midiInUnprepareHeader(state->midi_handle, state->midi_header.get(),
+ sizeof(*state->midi_header));
DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
<< "Failed to uninitialize input buffer: "
<< GetInErrorMessage(result);
}
return;
}
- if (header->dwBytesRecorded > 0 && port_index() != kInvalidPortIndex) {
- OnMessageReceived(reinterpret_cast<const uint8*>(header->lpData),
- header->dwBytesRecorded,
- elapsed_ms);
+ if (header->dwBytesRecorded > 0) {
+ const uint8* src = reinterpret_cast<const uint8*>(header->lpData);
+ std::vector<uint8> data;
+ data.assign(src, src + header->dwBytesRecorded);
+ // MIM_DATA/MIM_LONGDATA message treats the time when midiInStart() is
+ // called as the origin of |elapsed_ms|.
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757284.aspx
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757286.aspx
+ const base::TimeTicks event_time =
+ state->start_time + base::TimeDelta::FromMilliseconds(elapsed_ms);
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiServiceWinImpl::ReceiveMidiDataOnTaskThread,
+ base::Unretained(this), state->port_index, data,
+ event_time));
}
- result = midiInAddBuffer(midi_handle_, header, sizeof(*header));
+ result = midiInAddBuffer(state->midi_handle, header, sizeof(*header));
DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
- << "Failed to attach input port: " << GetInErrorMessage(result);
+ << "Failed to attach input buffer: " << GetInErrorMessage(result)
+ << "port number:" << state->port_index;
}
- void OnMessageReceived(const uint8* data, size_t length, DWORD elapsed_ms) {
- // MIM_DATA/MIM_LONGDATA message treats the time when midiInStart() is
- // called as the origin of |elapsed_ms|.
- // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757284.aspx
- // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757286.aspx
- const base::TimeTicks event_time =
- start_time_ + base::TimeDelta::FromMilliseconds(elapsed_ms);
- manager_->ReceiveMidiData(port_index_, data, length, event_time);
- }
-
- MidiManagerWin* manager_;
- int port_index_;
- HMIDIIN midi_handle_;
- ScopedMIDIHDR midi_header_;
- base::TimeTicks start_time_;
- bool started_;
- bool device_to_be_closed_;
- DISALLOW_COPY_AND_ASSIGN(InDeviceInfo);
-};
-
-class MidiManagerWin::OutDeviceInfo {
- public:
- ~OutDeviceInfo() {
- Uninitialize();
+ void OnMidiInCloseOnMultimediaThread(HMIDIIN midi_in_handle) {
+ auto state = GetInputDeviceFromHandle(midi_in_handle);
+ if (!state)
+ return;
+ const uint32 port_number = state->port_index;
+ const auto device_info(state->device_info);
+ {
+ base::AutoLock auto_lock(input_ports_lock_);
+ input_device_map_.erase(state->midi_handle);
+ input_ports_[port_number] = nullptr;
+ input_ports_ages_[port_number] += 1;
+ unused_input_ports_[device_info].push(port_number);
+ }
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiServiceWinImpl::SetInputPortStateOnTaskThread,
+ base::Unretained(this), port_number,
+ MIDI_PORT_DISCONNECTED));
}
- static scoped_ptr<OutDeviceInfo> Create(UINT device_id) {
- scoped_ptr<OutDeviceInfo> obj(new OutDeviceInfo);
- if (!obj->Initialize(device_id))
- obj.reset();
- return obj.Pass();
+ static void CALLBACK
+ OnMidiOutEventOnMainlyMultimediaThread(HMIDIOUT midi_out_handle,
+ UINT message,
+ DWORD_PTR instance,
+ DWORD_PTR param1,
+ DWORD_PTR param2) {
+ MidiServiceWinImpl* self = reinterpret_cast<MidiServiceWinImpl*>(instance);
+ if (!self)
+ return;
+ switch (message) {
+ case MOM_OPEN:
+ self->OnMidiOutOpen(midi_out_handle, param1, param2);
+ break;
+ case MOM_DONE:
+ self->OnMidiOutDoneOnMultimediaThread(midi_out_handle, param1);
+ break;
+ case MOM_CLOSE:
+ self->OnMidiOutCloseOnMultimediaThread(midi_out_handle);
+ break;
+ }
}
- HMIDIOUT midi_handle() const {
- return midi_handle_;
+ void OnMidiOutOpen(HMIDIOUT midi_out_handle,
+ DWORD_PTR param1,
+ DWORD_PTR param2) {
+ UINT device_id = 0;
+ MMRESULT result = midiOutGetID(midi_out_handle, &device_id);
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "midiOutGetID failed: " << GetOutErrorMessage(result);
+ return;
+ }
+ MIDIOUTCAPS2W caps = {};
+ result = midiOutGetDevCaps(
+ device_id, reinterpret_cast<LPMIDIOUTCAPSW>(&caps), sizeof(caps));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "midiInGetDevCaps failed: " << GetOutErrorMessage(result);
+ return;
+ }
+ auto state =
+ make_scoped_refptr(new MidiOutputDeviceState(MidiDeviceInfo(caps)));
+ state->midi_handle = midi_out_handle;
+ const auto& state_device_info = state->device_info;
+ bool add_new_port = false;
+ uint32 port_number = 0;
+ {
+ base::AutoLock auto_lock(output_ports_lock_);
+ const auto it = unused_output_ports_.find(state_device_info);
+ if (it == unused_output_ports_.end()) {
+ port_number = static_cast<uint32>(output_ports_.size());
+ add_new_port = true;
+ output_ports_.push_back(nullptr);
+ output_ports_ages_.push_back(0);
+ } else {
+ port_number = it->second.top();
+ it->second.pop();
+ if (it->second.empty())
+ unused_output_ports_.erase(it);
+ }
+ output_ports_[port_number] = state;
+ output_ports_ages_[port_number] += 1;
+ output_device_map_[output_ports_[port_number]->midi_handle] =
+ output_ports_[port_number];
+ output_ports_[port_number]->port_index = port_number;
+ output_ports_[port_number]->port_age = output_ports_ages_[port_number];
+ }
+ if (add_new_port) {
+ const MidiPortInfo port_info(
+ // TODO(toyoshim): Use a hash ID insted. crbug.com/467448
+ base::IntToString(static_cast<int>(port_number)),
+ GetManufacturerName(state_device_info),
+ base::WideToUTF8(state_device_info.product_name),
+ MmversionToString(state_device_info.driver_version),
+ MIDI_PORT_OPENED);
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MidiServiceWinImpl::AddOutputPortOnTaskThread,
+ base::Unretained(this), port_info));
+ } else {
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiServiceWinImpl::SetOutputPortStateOnTaskThread,
+ base::Unretained(this), port_number, MIDI_PORT_CONNECTED));
+ }
}
- void Quit() {
- quitting_ = true;
+ void OnMidiOutDoneOnMultimediaThread(HMIDIOUT midi_out_handle,
+ DWORD_PTR param1) {
+ auto state = GetOutputDeviceFromHandle(midi_out_handle);
+ if (!state)
+ return;
+ // Take ownership of the MIDIHDR object.
+ ScopedMIDIHDR header(reinterpret_cast<MIDIHDR*>(param1));
+ if (!header)
+ return;
+ MMRESULT result = midiOutUnprepareHeader(state->midi_handle, header.get(),
+ sizeof(*header));
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to uninitialize output buffer: "
+ << GetOutErrorMessage(result);
}
- void Send(const std::vector<uint8>& data) {
- // Check if the attached device is still available or not.
- if (!midi_handle_)
+ void OnMidiOutCloseOnMultimediaThread(HMIDIOUT midi_out_handle) {
+ auto state = GetOutputDeviceFromHandle(midi_out_handle);
+ if (!state)
return;
+ const uint32 port_number = state->port_index;
+ const auto device_info(state->device_info);
+ {
+ base::AutoLock auto_lock(output_ports_lock_);
+ output_device_map_.erase(state->midi_handle);
+ output_ports_[port_number] = nullptr;
+ output_ports_ages_[port_number] += 1;
+ unused_output_ports_[device_info].push(port_number);
+ state->closed = true;
+ }
+ task_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiServiceWinImpl::SetOutputPortStateOnTaskThread,
+ base::Unretained(this), port_number,
+ MIDI_PORT_DISCONNECTED));
+ }
+
+ /////////////////////////////////////////////////////////////////////////////
+ // Callbacks on the sender thread.
+ /////////////////////////////////////////////////////////////////////////////
+
+ void AssertOnSenderThread() {
+ DCHECK_EQ(sender_thread_.thread_id(), base::PlatformThread::CurrentId());
+ }
- // Give up sending MIDI messages here if the device is already closed.
- // Note that this check is optional. Regardless of that we check |closed_|
- // or not, nothing harmful happens as long as |midi_handle_| is still valid.
- if (closed_)
+ void SendOnSenderThread(uint32 port_number,
+ uint64 port_age,
+ const std::vector<uint8>& data,
+ base::TimeTicks time) {
+ AssertOnSenderThread();
+ if (destructor_started) {
+ LOG(ERROR) << "ThreadSafeSendData failed because MidiServiceWinImpl is "
+ "being destructed. port: " << port_number;
+ }
+ auto state = GetOutputDeviceFromPort(port_number);
+ if (!state) {
+ LOG(ERROR) << "ThreadSafeSendData failed due to an invalid port number. "
+ << "port: " << port_number;
+ return;
+ }
+ if (state->closed) {
+ LOG(ERROR)
+ << "ThreadSafeSendData failed because target port is already closed."
+ << "port: " << port_number;
return;
+ }
+ if (state->port_age != port_age) {
+ LOG(ERROR)
+ << "ThreadSafeSendData failed because target port is being closed."
+ << "port: " << port_number << "expected port age: " << port_age
+ << "actual port age: " << state->port_age;
+ }
// MIDI Running status must be filtered out.
MidiMessageQueue message_queue(false);
message_queue.Add(data);
std::vector<uint8> message;
- while (!quitting_) {
+ while (true) {
+ if (destructor_started)
+ break;
+ if (state->closed)
+ break;
message_queue.Get(&message);
if (message.empty())
break;
// SendShortMidiMessageInternal can send a MIDI message up to 3 bytes.
if (message.size() <= 3)
- SendShortMidiMessageInternal(midi_handle_, message);
+ SendShortMidiMessageInternal(state->midi_handle, message);
else
- SendLongMidiMessageInternal(midi_handle_, message);
+ SendLongMidiMessageInternal(state->midi_handle, message);
}
}
- private:
- OutDeviceInfo()
- : midi_handle_(NULL),
- closed_(false),
- quitting_(false) {}
-
- bool Initialize(DWORD device_id) {
- Uninitialize();
- // Here we use |CALLBACK_FUNCTION| to subscribe MOM_DONE and MOM_CLOSE
- // events.
- // - MOM_DONE: SendLongMidiMessageInternal() relies on this event to clean
- // up the backing store where a long MIDI message is stored.
- // - MOM_CLOSE: This event is sent when 1) midiOutClose() is called, or 2)
- // the MIDI device becomes unavailable for some reasons, e.g., the cable
- // is disconnected. As for the former case, HMIDIOUT will be invalidated
- // soon after the callback is finished. As for the later case, however,
- // HMIDIOUT continues to be valid until midiOutClose() is called.
- MMRESULT result = midiOutOpen(&midi_handle_,
- device_id,
- reinterpret_cast<DWORD_PTR>(&HandleMessage),
- reinterpret_cast<DWORD_PTR>(this),
- CALLBACK_FUNCTION);
- if (result != MMSYSERR_NOERROR) {
- DLOG(ERROR) << "Failed to open output device. "
- << " id: " << device_id
- << " message: "<< GetOutErrorMessage(result);
- midi_handle_ = NULL;
- return false;
- }
- return true;
+ /////////////////////////////////////////////////////////////////////////////
+ // Callbacks on the task thread.
+ /////////////////////////////////////////////////////////////////////////////
+
+ void AssertOnTaskThread() {
+ DCHECK_EQ(task_thread_.thread_id(), base::PlatformThread::CurrentId());
}
- void Uninitialize() {
- if (!midi_handle_)
- return;
+ void UpdateDeviceListOnTaskThread() {
+ AssertOnTaskThread();
+ const UINT num_in_devices = midiInGetNumDevs();
+ for (UINT device_id = 0; device_id < num_in_devices; ++device_id) {
+ // Here we use |CALLBACK_FUNCTION| to subscribe MIM_DATA, MIM_LONGDATA,
+ // MIM_OPEN, and MIM_CLOSE events.
+ // - MIM_DATA: This is the only way to get a short MIDI message with
+ // timestamp information.
+ // - MIM_LONGDATA: This is the only way to get a long MIDI message with
+ // timestamp information.
+ // - MIM_OPEN: This event is sent the input device is opened. Note that
+ // this message is called on the caller thread.
+ // - MIM_CLOSE: This event is sent when 1) midiInClose() is called, or 2)
+ // the MIDI device becomes unavailable for some reasons, e.g., the
+ // cable is disconnected. As for the former case, HMIDIOUT will be
+ // invalidated soon after the callback is finished. As for the later
+ // case, however, HMIDIOUT continues to be valid until midiInClose()
+ // is called.
+ HMIDIIN midi_handle = kInvalidMidiInHandle;
+ const MMRESULT result = midiInOpen(
+ &midi_handle, device_id,
+ reinterpret_cast<DWORD_PTR>(&OnMidiInEventOnMainlyMultimediaThread),
+ reinterpret_cast<DWORD_PTR>(this), CALLBACK_FUNCTION);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR && result != MMSYSERR_ALLOCATED)
+ << "Failed to open output device. "
+ << " id: " << device_id << " message: " << GetInErrorMessage(result);
+ }
- MMRESULT result = midiOutReset(midi_handle_);
- DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
- << "Failed to reset output port: " << GetOutErrorMessage(result);
- result = midiOutClose(midi_handle_);
- DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
- << "Failed to close output port: " << GetOutErrorMessage(result);
- midi_handle_ = NULL;
- closed_ = true;
+ const UINT num_out_devices = midiOutGetNumDevs();
+ for (UINT device_id = 0; device_id < num_out_devices; ++device_id) {
+ // Here we use |CALLBACK_FUNCTION| to subscribe MOM_DONE, MOM_OPEN, and
+ // MOM_CLOSE events.
+ // - MOM_DONE: SendLongMidiMessageInternal() relies on this event to clean
+ // up the backing store where a long MIDI message is stored.
+ // - MOM_OPEN: This event is sent the output device is opened. Note that
+ // this message is called on the caller thread.
+ // - MOM_CLOSE: This event is sent when 1) midiOutClose() is called, or 2)
+ // the MIDI device becomes unavailable for some reasons, e.g., the
+ // cable is disconnected. As for the former case, HMIDIOUT will be
+ // invalidated soon after the callback is finished. As for the later
+ // case, however, HMIDIOUT continues to be valid until midiOutClose()
+ // is called.
+ HMIDIOUT midi_handle = kInvalidMidiOutHandle;
+ const MMRESULT result = midiOutOpen(
+ &midi_handle, device_id,
+ reinterpret_cast<DWORD_PTR>(&OnMidiOutEventOnMainlyMultimediaThread),
+ reinterpret_cast<DWORD_PTR>(this), CALLBACK_FUNCTION);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR && result != MMSYSERR_ALLOCATED)
+ << "Failed to open output device. "
+ << " id: " << device_id << " message: " << GetOutErrorMessage(result);
+ }
}
- static void CALLBACK HandleMessage(HMIDIOUT midi_out_handle,
- UINT message,
- DWORD_PTR instance,
- DWORD_PTR param1,
- DWORD_PTR param2) {
- // This method can be called back on any thread depending on Windows
- // multimedia subsystem and underlying MIDI drivers.
-
- OutDeviceInfo* self = reinterpret_cast<OutDeviceInfo*>(instance);
- if (!self)
+ void StartInputDeviceOnTaskThread(HMIDIIN midi_in_handle) {
+ AssertOnTaskThread();
+ auto state = GetInputDeviceFromHandle(midi_in_handle);
+ if (!state)
+ return;
+ MMRESULT result =
+ midiInPrepareHeader(state->midi_handle, state->midi_header.get(),
+ sizeof(*state->midi_header));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to initialize input buffer: "
+ << GetInErrorMessage(result);
return;
- if (self->midi_handle() != midi_out_handle)
+ }
+ result = midiInAddBuffer(state->midi_handle, state->midi_header.get(),
+ sizeof(*state->midi_header));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to attach input buffer: "
+ << GetInErrorMessage(result);
+ return;
+ }
+ result = midiInStart(state->midi_handle);
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to start input port: "
+ << GetInErrorMessage(result);
return;
- switch (message) {
- case MOM_DONE: {
- // Take ownership of the MIDIHDR object.
- ScopedMIDIHDR header(reinterpret_cast<MIDIHDR*>(param1));
- if (!header)
- return;
- MMRESULT result = midiOutUnprepareHeader(
- self->midi_handle(), header.get(), sizeof(*header));
- DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
- << "Failed to uninitialize output buffer: "
- << GetOutErrorMessage(result);
- return;
- }
- case MOM_CLOSE:
- // No lock is required since this flag is just a hint to avoid
- // unnecessary API calls that will result in failure anyway.
- self->closed_ = true;
- // TODO(yukawa): Implement crbug.com/279097.
- return;
}
+ state->start_time = base::TimeTicks::Now();
+ state->start_time_initialized = true;
+ }
+
+ void CompleteInitializationOnTaskThread(MidiResult result) {
+ AssertOnTaskThread();
+ delegate_->OnCompleteInitialization(result);
+ }
+
+ void ReceiveMidiDataOnTaskThread(uint32 port_index,
+ std::vector<uint8> data,
+ base::TimeTicks time) {
+ AssertOnTaskThread();
+ delegate_->OnReceiveMidiData(port_index, data, time);
+ }
+
+ void AddInputPortOnTaskThread(MidiPortInfo info) {
+ AssertOnTaskThread();
+ delegate_->OnAddInputPort(info);
+ }
+
+ void AddOutputPortOnTaskThread(MidiPortInfo info) {
+ AssertOnTaskThread();
+ delegate_->OnAddOutputPort(info);
+ }
+
+ void SetInputPortStateOnTaskThread(uint32 port_index, MidiPortState state) {
+ AssertOnTaskThread();
+ delegate_->OnSetInputPortState(port_index, state);
+ }
+
+ void SetOutputPortStateOnTaskThread(uint32 port_index, MidiPortState state) {
+ AssertOnTaskThread();
+ delegate_->OnSetOutputPortState(port_index, state);
}
- HMIDIOUT midi_handle_;
+ /////////////////////////////////////////////////////////////////////////////
+ // Fields:
+ /////////////////////////////////////////////////////////////////////////////
+
+ // Does not take ownership.
+ MidiServiceWinDelegate* delegate_;
+
+ base::ThreadChecker thread_checker_;
+
+ base::Thread sender_thread_;
+ base::Thread task_thread_;
- // True if the device is already closed.
- volatile bool closed_;
+ base::Lock input_ports_lock_;
+ base::hash_map<HMIDIIN, scoped_refptr<MidiInputDeviceState>>
+ input_device_map_; // GUARDED_BY(input_ports_lock_)
+ PortNumberCache unused_input_ports_; // GUARDED_BY(input_ports_lock_)
+ std::vector<scoped_refptr<MidiInputDeviceState>>
+ input_ports_; // GUARDED_BY(input_ports_lock_)
+ std::vector<uint64> input_ports_ages_; // GUARDED_BY(input_ports_lock_)
- // True if the MidiManagerWin is trying to stop the sender thread.
- volatile bool quitting_;
+ base::Lock output_ports_lock_;
+ base::hash_map<HMIDIOUT, scoped_refptr<MidiOutputDeviceState>>
+ output_device_map_; // GUARDED_BY(output_ports_lock_)
+ PortNumberCache unused_output_ports_; // GUARDED_BY(output_ports_lock_)
+ std::vector<scoped_refptr<MidiOutputDeviceState>>
+ output_ports_; // GUARDED_BY(output_ports_lock_)
+ std::vector<uint64> output_ports_ages_; // GUARDED_BY(output_ports_lock_)
- DISALLOW_COPY_AND_ASSIGN(OutDeviceInfo);
+ // True if one thread reached MidiServiceWinImpl::~MidiServiceWinImpl(). Note
+ // that MidiServiceWinImpl::~MidiServiceWinImpl() is blocked until
+ // |sender_thread_|, and |task_thread_| are stopped.
+ // This flag can be used as the signal that when background tasks must be
+ // interrupted.
+ // TODO(toyoshim): Use std::atomic<bool> when it is allowed.
+ volatile bool destructor_started;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiServiceWinImpl);
};
-MidiManagerWin::MidiManagerWin()
- : send_thread_("MidiSendThread") {
-}
+} // namespace
-void MidiManagerWin::StartInitialization() {
- const UINT num_in_devices = midiInGetNumDevs();
- in_devices_.reserve(num_in_devices);
- int inport_index = 0;
- for (UINT device_id = 0; device_id < num_in_devices; ++device_id) {
- MIDIINCAPS caps = {};
- MMRESULT result = midiInGetDevCaps(device_id, &caps, sizeof(caps));
- if (result != MMSYSERR_NOERROR) {
- DLOG(ERROR) << "Failed to obtain input device info: "
- << GetInErrorMessage(result);
- continue;
- }
- scoped_ptr<InDeviceInfo> in_device(InDeviceInfo::Create(this, device_id));
- if (!in_device)
- continue;
- MidiPortInfo info(
- base::IntToString(static_cast<int>(device_id)),
- "",
- base::WideToUTF8(caps.szPname),
- base::IntToString(static_cast<int>(caps.vDriverVersion)));
- AddInputPort(info);
- in_device->set_port_index(inport_index++);
- in_devices_.push_back(in_device.release());
- }
-
- const UINT num_out_devices = midiOutGetNumDevs();
- out_devices_.reserve(num_out_devices);
- for (UINT device_id = 0; device_id < num_out_devices; ++device_id) {
- MIDIOUTCAPS caps = {};
- MMRESULT result = midiOutGetDevCaps(device_id, &caps, sizeof(caps));
- if (result != MMSYSERR_NOERROR) {
- DLOG(ERROR) << "Failed to obtain output device info: "
- << GetOutErrorMessage(result);
- continue;
- }
- scoped_ptr<OutDeviceInfo> out_port(OutDeviceInfo::Create(device_id));
- if (!out_port)
- continue;
- MidiPortInfo info(
- base::IntToString(static_cast<int>(device_id)),
- "",
- base::WideToUTF8(caps.szPname),
- base::IntToString(static_cast<int>(caps.vDriverVersion)));
- AddOutputPort(info);
- out_devices_.push_back(out_port.release());
- }
-
- CompleteInitialization(MIDI_OK);
+MidiManagerWin::MidiManagerWin() {
}
MidiManagerWin::~MidiManagerWin() {
- // Cleanup order is important. |send_thread_| must be stopped before
- // |out_devices_| is cleared.
- for (auto& device : out_devices_)
- device->Quit();
- send_thread_.Stop();
-
- out_devices_.clear();
- in_devices_.clear();
+ midi_service_.reset();
+}
+
+void MidiManagerWin::StartInitialization() {
+ midi_service_.reset(new MidiServiceWinImpl);
+ // Note that |CompleteInitialization()| will be called from the callback.
+ midi_service_->InitializeAsync(this);
}
void MidiManagerWin::DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp) {
- if (out_devices_.size() <= port_index)
+ if (!midi_service_)
return;
- base::TimeDelta delay;
+ base::TimeTicks time_to_send = base::TimeTicks::Now();
if (timestamp != 0.0) {
- base::TimeTicks time_to_send =
+ time_to_send =
base::TimeTicks() + base::TimeDelta::FromMicroseconds(
- timestamp * base::Time::kMicrosecondsPerSecond);
- delay = std::max(time_to_send - base::TimeTicks::Now(), base::TimeDelta());
- }
-
- if (!send_thread_.IsRunning())
- send_thread_.Start();
-
- OutDeviceInfo* out_port = out_devices_[port_index];
- send_thread_.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&OutDeviceInfo::Send, base::Unretained(out_port), data),
- delay);
-
- // Call back AccumulateMidiBytesSent() on |send_thread_| to emulate the
- // behavior of MidiManagerMac::SendMidiData.
- // TODO(yukawa): Do this task in a platform-independent way if possible.
- // See crbug.com/325810.
- send_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&MidiManagerClient::AccumulateMidiBytesSent,
- base::Unretained(client), data.size()));
+ timestamp * base::Time::kMicrosecondsPerSecond);
+ }
+ midi_service_->SendMidiDataAsync(port_index, data, time_to_send);
+
+ // TOOD(toyoshim): This calculation should be done when the date is actually
+ // sent.
+ client->AccumulateMidiBytesSent(data.size());
+}
+
+void MidiManagerWin::OnCompleteInitialization(MidiResult result) {
+ CompleteInitialization(result);
+}
+
+void MidiManagerWin::OnAddInputPort(MidiPortInfo info) {
+ AddInputPort(info);
+}
+
+void MidiManagerWin::OnAddOutputPort(MidiPortInfo info) {
+ AddOutputPort(info);
+}
+
+void MidiManagerWin::OnSetInputPortState(uint32 port_index,
+ MidiPortState state) {
+ SetInputPortState(port_index, state);
+}
+
+void MidiManagerWin::OnSetOutputPortState(uint32 port_index,
+ MidiPortState state) {
+ SetOutputPortState(port_index, state);
+}
+
+void MidiManagerWin::OnReceiveMidiData(uint32 port_index,
+ const std::vector<uint8>& data,
+ base::TimeTicks time) {
+ ReceiveMidiData(port_index, &data[0], data.size(), time);
}
MidiManager* MidiManager::Create() {
return new MidiManagerWin();
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_manager_win.h b/chromium/media/midi/midi_manager_win.h
index ff40e981020..f5598415993 100644
--- a/chromium/media/midi/midi_manager_win.h
+++ b/chromium/media/midi/midi_manager_win.h
@@ -8,33 +8,66 @@
#include <vector>
#include "base/basictypes.h"
-#include "base/memory/scoped_vector.h"
+#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
+#include "base/time/time.h"
#include "media/midi/midi_manager.h"
namespace media {
+namespace midi {
-class MidiManagerWin : public MidiManager {
+class MidiServiceWinDelegate {
+ public:
+ virtual ~MidiServiceWinDelegate() {}
+ virtual void OnCompleteInitialization(MidiResult result) = 0;
+ virtual void OnAddInputPort(MidiPortInfo info) = 0;
+ virtual void OnAddOutputPort(MidiPortInfo info) = 0;
+ virtual void OnSetInputPortState(uint32 port_index, MidiPortState state) = 0;
+ virtual void OnSetOutputPortState(uint32 port_index, MidiPortState state) = 0;
+ virtual void OnReceiveMidiData(uint32 port_index,
+ const std::vector<uint8>& data,
+ base::TimeTicks time) = 0;
+};
+
+class MidiServiceWin {
+ public:
+ virtual ~MidiServiceWin() {}
+ // This method may return before the initialization is completed.
+ virtual void InitializeAsync(MidiServiceWinDelegate* delegate) = 0;
+ // This method may return before the specified data is actually sent.
+ virtual void SendMidiDataAsync(uint32 port_number,
+ const std::vector<uint8>& data,
+ base::TimeTicks time) = 0;
+};
+
+class MidiManagerWin final : public MidiManager, public MidiServiceWinDelegate {
public:
MidiManagerWin();
- virtual ~MidiManagerWin();
+ ~MidiManagerWin() override;
+
+ // MidiManager overrides:
+ void StartInitialization() final;
+ void DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) final;
- // MidiManager implementation.
- virtual void StartInitialization() override;
- virtual void DispatchSendMidiData(MidiManagerClient* client,
- uint32 port_index,
- const std::vector<uint8>& data,
- double timestamp) override;
+ // MidiServiceWinDelegate overrides:
+ void OnCompleteInitialization(MidiResult result) final;
+ void OnAddInputPort(MidiPortInfo info) final;
+ void OnAddOutputPort(MidiPortInfo info) final;
+ void OnSetInputPortState(uint32 port_index, MidiPortState state) final;
+ void OnSetOutputPortState(uint32 port_index, MidiPortState state) final;
+ void OnReceiveMidiData(uint32 port_index,
+ const std::vector<uint8>& data,
+ base::TimeTicks time) final;
private:
- class InDeviceInfo;
- class OutDeviceInfo;
- ScopedVector<InDeviceInfo> in_devices_;
- ScopedVector<OutDeviceInfo> out_devices_;
- base::Thread send_thread_;
+ scoped_ptr<MidiServiceWin> midi_service_;
DISALLOW_COPY_AND_ASSIGN(MidiManagerWin);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_MANAGER_WIN_H_
diff --git a/chromium/media/midi/midi_message_queue.cc b/chromium/media/midi/midi_message_queue.cc
index 8011136c1bc..d35acc27bf9 100644
--- a/chromium/media/midi/midi_message_queue.cc
+++ b/chromium/media/midi/midi_message_queue.cc
@@ -10,6 +10,7 @@
#include "media/midi/midi_message_util.h"
namespace media {
+namespace midi {
namespace {
const uint8 kSysEx = 0xf0;
@@ -27,6 +28,10 @@ bool IsSystemRealTimeMessage(uint8 data) {
return 0xf8 <= data;
}
+bool IsSystemMessage(uint8 data) {
+ return 0xf0 <= data;
+}
+
} // namespace
MidiMessageQueue::MidiMessageQueue(bool allow_running_status)
@@ -104,9 +109,10 @@ void MidiMessageQueue::Get(std::vector<uint8>* message) {
if (next_message_.size() == target_len) {
std::swap(*message, next_message_);
next_message_.clear();
- if (allow_running_status_) {
+ if (allow_running_status_ && !IsSystemMessage(status_byte)) {
// Speculatively keep the status byte in case of running status. If this
// assumption is not true, |next_message_| will be cleared anyway.
+ // Note that system common messages should reset the running status.
next_message_.push_back(status_byte);
}
return;
@@ -116,4 +122,5 @@ void MidiMessageQueue::Get(std::vector<uint8>* message) {
}
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_message_queue.h b/chromium/media/midi/midi_message_queue.h
index f565f188293..e494c5a18ab 100644
--- a/chromium/media/midi/midi_message_queue.h
+++ b/chromium/media/midi/midi_message_queue.h
@@ -9,9 +9,10 @@
#include <vector>
#include "base/basictypes.h"
-#include "media/base/media_export.h"
+#include "media/midi/midi_export.h"
namespace media {
+namespace midi {
// A simple message splitter for possibly unsafe/corrupted MIDI data stream.
// This class allows you to:
@@ -38,7 +39,7 @@ namespace media {
// dispatch(next_message);
// }
// }
-class MEDIA_EXPORT MidiMessageQueue {
+class MIDI_EXPORT MidiMessageQueue {
public:
// Initializes the queue. Set true to |allow_running_status| to enable
// "MIDI running status" reconstruction.
@@ -67,6 +68,7 @@ class MEDIA_EXPORT MidiMessageQueue {
DISALLOW_COPY_AND_ASSIGN(MidiMessageQueue);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_MESSAGE_QUEUE_H_
diff --git a/chromium/media/midi/midi_message_queue_unittest.cc b/chromium/media/midi/midi_message_queue_unittest.cc
index 3c7122654e7..d25a692ad32 100644
--- a/chromium/media/midi/midi_message_queue_unittest.cc
+++ b/chromium/media/midi/midi_message_queue_unittest.cc
@@ -7,6 +7,7 @@
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
+namespace midi {
namespace {
const uint8 kGMOn[] = { 0xf0, 0x7e, 0x7f, 0x09, 0x01, 0xf7 };
@@ -22,6 +23,7 @@ const uint8 kChannelPressureWithRunningStatus[] = {
0xd0, 0x01, 0x01, 0x01,
};
const uint8 kTimingClock[] = { 0xf8 };
+const uint8 kMTCFrame[] = { 0xf1, 0x00 };
const uint8 kBrokenData1[] = { 0x90 };
const uint8 kBrokenData2[] = { 0xf7 };
const uint8 kBrokenData3[] = { 0xf2, 0x00 };
@@ -141,11 +143,11 @@ TEST(MidiMessageQueueTest, RunningStatusEnabled) {
TEST(MidiMessageQueueTest, RunningStatusEnabledWithRealTimeEvent) {
MidiMessageQueue queue(true);
- const uint8 kNoteOnWithRunningStatusWithkTimingClock[] = {
+ const uint8 kNoteOnWithRunningStatusWithTimingClock[] = {
0x90, 0xf8, 0x3c, 0xf8, 0x7f, 0xf8, 0x3c, 0xf8, 0x7f, 0xf8, 0x3c, 0xf8,
0x7f,
};
- Add(&queue, kNoteOnWithRunningStatusWithkTimingClock);
+ Add(&queue, kNoteOnWithRunningStatusWithTimingClock);
std::vector<uint8> message;
queue.Get(&message);
EXPECT_MESSAGE(kTimingClock, message);
@@ -169,5 +171,25 @@ TEST(MidiMessageQueueTest, RunningStatusEnabledWithRealTimeEvent) {
EXPECT_TRUE(message.empty());
}
+TEST(MidiMessageQueueTest, RunningStatusEnabledWithSystemCommonMessage) {
+ MidiMessageQueue queue(true);
+ const uint8 kNoteOnWithRunningStatusWithSystemCommonMessage[] = {
+ 0x90, 0x3c, 0x7f, 0xf1, 0x00, 0x3c, 0x7f, 0xf8, 0x90, 0x3c, 0x7f,
+ };
+ Add(&queue, kNoteOnWithRunningStatusWithSystemCommonMessage);
+ std::vector<uint8> message;
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kMTCFrame, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message) << "Running status should be reset";
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_TRUE(message.empty());
+}
+
} // namespace
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_message_util.cc b/chromium/media/midi/midi_message_util.cc
index 9e913d70c06..e8e953d1947 100644
--- a/chromium/media/midi/midi_message_util.cc
+++ b/chromium/media/midi/midi_message_util.cc
@@ -5,6 +5,7 @@
#include "media/midi/midi_message_util.h"
namespace media {
+namespace midi {
size_t GetMidiMessageLength(uint8 status_byte) {
if (status_byte < 0x80)
@@ -31,4 +32,5 @@ size_t GetMidiMessageLength(uint8 status_byte) {
return 1;
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_message_util.h b/chromium/media/midi/midi_message_util.h
index faaff178373..6533ab3f1a1 100644
--- a/chromium/media/midi/midi_message_util.h
+++ b/chromium/media/midi/midi_message_util.h
@@ -9,16 +9,17 @@
#include <vector>
#include "base/basictypes.h"
-#include "media/base/media_export.h"
+#include "media/midi/midi_export.h"
namespace media {
+namespace midi {
// Returns the length of a MIDI message in bytes. Never returns 4 or greater.
// Returns 0 if |status_byte| is:
// - not a valid status byte, namely data byte.
// - the MIDI System Exclusive message.
// - the End of System Exclusive message.
-MEDIA_EXPORT size_t GetMidiMessageLength(uint8 status_byte);
+MIDI_EXPORT size_t GetMidiMessageLength(uint8 status_byte);
const uint8 kSysExByte = 0xf0;
const uint8 kEndOfSysExByte = 0xf7;
@@ -28,6 +29,7 @@ const uint8 kSysMessageBitPattern = 0xf0;
const uint8 kSysRTMessageBitMask = 0xf8;
const uint8 kSysRTMessageBitPattern = 0xf8;
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_MESSAGE_UTIL_H_
diff --git a/chromium/media/midi/midi_message_util_unittest.cc b/chromium/media/midi/midi_message_util_unittest.cc
index 529efbf9586..25dbf5e6b9c 100644
--- a/chromium/media/midi/midi_message_util_unittest.cc
+++ b/chromium/media/midi/midi_message_util_unittest.cc
@@ -7,6 +7,7 @@
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
+namespace midi {
namespace {
const uint8 kGMOn[] = { 0xf0, 0x7e, 0x7f, 0x09, 0x01, 0xf7 };
@@ -31,4 +32,5 @@ TEST(GetMidiMessageLengthTest, BasicTest) {
}
} // namespace
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_port_info.cc b/chromium/media/midi/midi_port_info.cc
index 02b4aa9ba7d..112829ac8e6 100644
--- a/chromium/media/midi/midi_port_info.cc
+++ b/chromium/media/midi/midi_port_info.cc
@@ -5,17 +5,20 @@
#include "media/midi/midi_port_info.h"
namespace media {
+namespace midi {
MidiPortInfo::MidiPortInfo() {}
MidiPortInfo::MidiPortInfo(const std::string& in_id,
const std::string& in_manufacturer,
const std::string& in_name,
- const std::string& in_version)
+ const std::string& in_version,
+ MidiPortState in_state)
: id(in_id),
manufacturer(in_manufacturer),
name(in_name),
- version(in_version) {}
+ version(in_version),
+ state(in_state) {}
MidiPortInfo::~MidiPortInfo() {}
@@ -23,6 +26,8 @@ MidiPortInfo::MidiPortInfo(const MidiPortInfo& info)
: id(info.id),
manufacturer(info.manufacturer),
name(info.name),
- version(info.version) {}
+ version(info.version),
+ state(info.state) {}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/midi_port_info.h b/chromium/media/midi/midi_port_info.h
index 1fe3bcaf0fc..c53c9060fc2 100644
--- a/chromium/media/midi/midi_port_info.h
+++ b/chromium/media/midi/midi_port_info.h
@@ -9,16 +9,25 @@
#include <vector>
#include "base/basictypes.h"
-#include "media/base/media_export.h"
+#include "media/midi/midi_export.h"
namespace media {
+namespace midi {
-struct MEDIA_EXPORT MidiPortInfo {
+enum MidiPortState {
+ MIDI_PORT_DISCONNECTED,
+ MIDI_PORT_CONNECTED,
+ MIDI_PORT_OPENED,
+ MIDI_PORT_STATE_LAST = MIDI_PORT_OPENED,
+};
+
+struct MIDI_EXPORT MidiPortInfo final {
MidiPortInfo();
MidiPortInfo(const std::string& in_id,
const std::string& in_manufacturer,
const std::string& in_name,
- const std::string& in_version);
+ const std::string& in_version,
+ MidiPortState in_state);
MidiPortInfo(const MidiPortInfo& info);
~MidiPortInfo();
@@ -27,10 +36,12 @@ struct MEDIA_EXPORT MidiPortInfo {
std::string manufacturer;
std::string name;
std::string version;
+ MidiPortState state;
};
-typedef std::vector<MidiPortInfo> MidiPortInfoList;
+using MidiPortInfoList = std::vector<MidiPortInfo>;
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_PORT_INFO_H_
diff --git a/chromium/media/midi/midi_result.h b/chromium/media/midi/midi_result.h
index 2fb58a41bdd..5432dca7a1c 100644
--- a/chromium/media/midi/midi_result.h
+++ b/chromium/media/midi/midi_result.h
@@ -6,6 +6,7 @@
#define MEDIA_MIDI_MIDI_RESULT_H_
namespace media {
+namespace midi {
// Result codes for MIDI.
enum MidiResult {
@@ -20,6 +21,7 @@ enum MidiResult {
MIDI_RESULT_LAST = MIDI_INITIALIZATION_ERROR,
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_MIDI_RESULT_H_
diff --git a/chromium/media/midi/midi_scheduler.cc b/chromium/media/midi/midi_scheduler.cc
new file mode 100644
index 00000000000..31886747259
--- /dev/null
+++ b/chromium/media/midi/midi_scheduler.cc
@@ -0,0 +1,57 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_scheduler.h"
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "media/midi/midi_manager.h"
+
+namespace media {
+namespace midi {
+
+MidiScheduler::MidiScheduler(MidiManager* manager)
+ : manager_(manager),
+ weak_factory_(this) {
+}
+
+MidiScheduler::~MidiScheduler() {
+}
+
+// TODO(crbug.com/467442): Use CancelableTaskTracker once it supports
+// DelayedTask.
+void MidiScheduler::PostSendDataTask(MidiManagerClient* client,
+ size_t length,
+ double timestamp,
+ const base::Closure& closure) {
+ DCHECK(client);
+
+ const base::Closure& weak_closure = base::Bind(
+ &MidiScheduler::InvokeClosure,
+ weak_factory_.GetWeakPtr(),
+ client,
+ length,
+ closure);
+
+ base::TimeDelta delay;
+ if (timestamp != 0.0) {
+ base::TimeTicks time_to_send =
+ base::TimeTicks() + base::TimeDelta::FromMicroseconds(
+ timestamp * base::Time::kMicrosecondsPerSecond);
+ delay = std::max(time_to_send - base::TimeTicks::Now(), base::TimeDelta());
+ }
+ base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ FROM_HERE, weak_closure, delay);
+}
+
+void MidiScheduler::InvokeClosure(MidiManagerClient* client,
+ size_t length,
+ const base::Closure& closure) {
+ closure.Run();
+ manager_->AccumulateMidiBytesSent(client, length);
+}
+
+} // namespace midi
+} // namespace media
diff --git a/chromium/media/midi/midi_scheduler.h b/chromium/media/midi/midi_scheduler.h
new file mode 100644
index 00000000000..58de5d23286
--- /dev/null
+++ b/chromium/media/midi/midi_scheduler.h
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_SCHEDULER_H_
+#define MEDIA_MIDI_MIDI_SCHEDULER_H_
+
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "media/midi/midi_export.h"
+
+namespace media {
+namespace midi {
+
+class MidiManager;
+class MidiManagerClient;
+
+// TODO(crbug.com/467442): Make tasks cancelable per client.
+class MIDI_EXPORT MidiScheduler final {
+ public:
+ explicit MidiScheduler(MidiManager* manager);
+ ~MidiScheduler();
+
+ // Post |closure| to the current message loop safely. The |closure| will not
+ // be invoked after MidiScheduler is deleted. AccumulateMidiBytesSent() of
+ // |client| is called internally.
+ void PostSendDataTask(MidiManagerClient* client,
+ size_t length,
+ double timestamp,
+ const base::Closure& closure);
+
+ private:
+ void InvokeClosure(MidiManagerClient* client,
+ size_t length,
+ const base::Closure& closure);
+
+ // MidiManager should own the MidiScheduler and be alive longer.
+ MidiManager* manager_;
+ base::WeakPtrFactory<MidiScheduler> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiScheduler);
+};
+
+} // namespace midi
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_SCHEDULER_H_
diff --git a/chromium/media/midi/midi_unittests.isolate b/chromium/media/midi/midi_unittests.isolate
new file mode 100644
index 00000000000..dca0d3596d1
--- /dev/null
+++ b/chromium/media/midi/midi_unittests.isolate
@@ -0,0 +1,63 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ '../../base/base.isolate',
+ ],
+ 'conditions': [
+ ['use_x11==0', {
+ 'variables': {
+ 'command': [
+ '../../testing/test_env.py',
+ '<(PRODUCT_DIR)/midi_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ },
+ }],
+ ['use_x11==1', {
+ 'variables': {
+ 'command': [
+ '../../testing/xvfb.py',
+ '<(PRODUCT_DIR)',
+ '<(PRODUCT_DIR)/midi_unittests',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ 'files': [
+ '../../testing/xvfb.py',
+ '<(PRODUCT_DIR)/xdisplaycheck',
+ ],
+ },
+ }],
+ ['OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ '../../testing/test_env.py',
+ '<(PRODUCT_DIR)/midi_unittests<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1 and fastbuild==0', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/midi_unittests.dSYM/',
+ ],
+ },
+ }],
+ ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/midi_unittests.exe.pdb',
+ ],
+ },
+ }],
+ ],
+}
diff --git a/chromium/media/midi/usb_midi_descriptor_parser.cc b/chromium/media/midi/usb_midi_descriptor_parser.cc
index d454ff94691..9d0af75be9c 100644
--- a/chromium/media/midi/usb_midi_descriptor_parser.cc
+++ b/chromium/media/midi/usb_midi_descriptor_parser.cc
@@ -7,8 +7,10 @@
#include <algorithm>
#include "base/logging.h"
+#include "base/strings/stringprintf.h"
namespace media {
+namespace midi {
namespace {
@@ -58,8 +60,20 @@ class JackMatcher {
uint8 id_;
};
+int DecodeBcd(uint8 byte) {
+ DCHECK_LT((byte & 0xf0) >> 4, 0xa);
+ DCHECK_LT(byte & 0x0f, 0xa);
+ return ((byte & 0xf0) >> 4) * 10 + (byte & 0x0f);
+}
+
} // namespace
+std::string UsbMidiDescriptorParser::DeviceInfo::BcdVersionToString(
+ uint16 version) {
+ return base::StringPrintf("%d.%02d", DecodeBcd(version >> 8),
+ DecodeBcd(version & 0xff));
+}
+
UsbMidiDescriptorParser::UsbMidiDescriptorParser()
: is_parsing_usb_midi_interface_(false),
current_endpoint_address_(0),
@@ -79,6 +93,31 @@ bool UsbMidiDescriptorParser::Parse(UsbMidiDevice* device,
return result;
}
+bool UsbMidiDescriptorParser::ParseDeviceInfo(
+ const uint8* data, size_t size, DeviceInfo* info) {
+ *info = DeviceInfo();
+ for (const uint8* current = data;
+ current < data + size;
+ current += current[0]) {
+ uint8 length = current[0];
+ if (length < 2) {
+ DVLOG(1) << "Descriptor Type is not accessible.";
+ return false;
+ }
+ if (current + length > data + size) {
+ DVLOG(1) << "The header size is incorrect.";
+ return false;
+ }
+ DescriptorType descriptor_type = static_cast<DescriptorType>(current[1]);
+ if (descriptor_type != TYPE_DEVICE)
+ continue;
+ // We assume that ParseDevice doesn't modify |*info| if it returns false.
+ return ParseDevice(current, length, info);
+ }
+ // No DEVICE descriptor is found.
+ return false;
+}
+
bool UsbMidiDescriptorParser::ParseInternal(UsbMidiDevice* device,
const uint8* data,
size_t size,
@@ -129,6 +168,21 @@ bool UsbMidiDescriptorParser::ParseInternal(UsbMidiDevice* device,
return true;
}
+bool UsbMidiDescriptorParser::ParseDevice(
+ const uint8* data, size_t size, DeviceInfo* info) {
+ if (size < 0x12) {
+ DVLOG(1) << "DEVICE header size is incorrect.";
+ return false;
+ }
+
+ info->vendor_id = data[8] | (data[9] << 8);
+ info->product_id = data[0xa] | (data[0xb] << 8);
+ info->bcd_device_version = data[0xc] | (data[0xd] << 8);
+ info->manufacturer_index = data[0xe];
+ info->product_index = data[0xf];
+ return true;
+}
+
bool UsbMidiDescriptorParser::ParseInterface(const uint8* data, size_t size) {
if (size != 9) {
DVLOG(1) << "INTERFACE header size is incorrect.";
@@ -232,4 +286,5 @@ void UsbMidiDescriptorParser::Clear() {
incomplete_jacks_.clear();
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/usb_midi_descriptor_parser.h b/chromium/media/midi/usb_midi_descriptor_parser.h
index 826a449ef6e..8f741ca4cf9 100644
--- a/chromium/media/midi/usb_midi_descriptor_parser.h
+++ b/chromium/media/midi/usb_midi_descriptor_parser.h
@@ -5,13 +5,15 @@
#ifndef MEDIA_MIDI_USB_MIDI_DESCRIPTOR_PARSER_H_
#define MEDIA_MIDI_USB_MIDI_DESCRIPTOR_PARSER_H_
+#include <string>
#include <vector>
#include "base/basictypes.h"
-#include "media/base/media_export.h"
+#include "media/midi/usb_midi_export.h"
#include "media/midi/usb_midi_jack.h"
namespace media {
+namespace midi {
class UsbMidiDevice;
@@ -19,8 +21,26 @@ class UsbMidiDevice;
// generates input / output lists of MIDIPortInfo.
// This is not a generic USB descriptor parser: this parser is designed
// for collecting USB-MIDI jacks information from the descriptor.
-class MEDIA_EXPORT UsbMidiDescriptorParser {
+class USB_MIDI_EXPORT UsbMidiDescriptorParser {
public:
+ struct DeviceInfo {
+ DeviceInfo()
+ : vendor_id(0),
+ product_id(0),
+ bcd_device_version(0),
+ manufacturer_index(0),
+ product_index(0) {}
+ uint16 vendor_id;
+ uint16 product_id;
+ // The higher one byte represents the "major" number and the lower one byte
+ // represents the "minor" number.
+ uint16 bcd_device_version;
+ uint8 manufacturer_index;
+ uint8 product_index;
+
+ static std::string BcdVersionToString(uint16);
+ };
+
UsbMidiDescriptorParser();
~UsbMidiDescriptorParser();
@@ -32,11 +52,14 @@ class MEDIA_EXPORT UsbMidiDescriptorParser {
size_t size,
std::vector<UsbMidiJack>* jacks);
+ bool ParseDeviceInfo(const uint8* data, size_t size, DeviceInfo* info);
+
private:
bool ParseInternal(UsbMidiDevice* device,
const uint8* data,
size_t size,
std::vector<UsbMidiJack>* jacks);
+ bool ParseDevice(const uint8* data, size_t size, DeviceInfo* info);
bool ParseInterface(const uint8* data, size_t size);
bool ParseCSInterface(UsbMidiDevice* device, const uint8* data, size_t size);
bool ParseEndpoint(const uint8* data, size_t size);
@@ -54,7 +77,7 @@ class MEDIA_EXPORT UsbMidiDescriptorParser {
DISALLOW_COPY_AND_ASSIGN(UsbMidiDescriptorParser);
};
-
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_USB_MIDI_DESCRIPTOR_PARSER_H_
diff --git a/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc b/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
index 4e243d15b72..c0e569dd72b 100644
--- a/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
+++ b/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
@@ -7,13 +7,14 @@
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
+namespace midi {
namespace {
TEST(UsbMidiDescriptorParserTest, ParseEmpty) {
UsbMidiDescriptorParser parser;
std::vector<UsbMidiJack> jacks;
- EXPECT_TRUE(parser.Parse(NULL, NULL, 0, &jacks));
+ EXPECT_TRUE(parser.Parse(nullptr, nullptr, 0, &jacks));
EXPECT_TRUE(jacks.empty());
}
@@ -21,7 +22,7 @@ TEST(UsbMidiDescriptorParserTest, InvalidSize) {
UsbMidiDescriptorParser parser;
std::vector<UsbMidiJack> jacks;
uint8 data[] = {0x04};
- EXPECT_FALSE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ EXPECT_FALSE(parser.Parse(nullptr, data, arraysize(data), &jacks));
EXPECT_TRUE(jacks.empty());
}
@@ -35,7 +36,7 @@ TEST(UsbMidiDescriptorParserTest, NonExistingJackIsAssociated) {
0x24, 0x01, 0x00, 0x01, 0x07, 0x00, 0x05, 0x25, 0x01, 0x01,
0x01,
};
- EXPECT_FALSE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ EXPECT_FALSE(parser.Parse(nullptr, data, arraysize(data), &jacks));
EXPECT_TRUE(jacks.empty());
}
@@ -50,7 +51,7 @@ TEST(UsbMidiDescriptorParserTest,
0x24, 0x01, 0x00, 0x01, 0x07, 0x00, 0x05, 0x25, 0x01, 0x01,
0x01,
};
- EXPECT_TRUE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ EXPECT_TRUE(parser.Parse(nullptr, data, arraysize(data), &jacks));
EXPECT_TRUE(jacks.empty());
}
@@ -74,28 +75,51 @@ TEST(UsbMidiDescriptorParserTest, Parse) {
0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
0x05, 0x25, 0x01, 0x01, 0x07,
};
- EXPECT_TRUE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ EXPECT_TRUE(parser.Parse(nullptr, data, arraysize(data), &jacks));
ASSERT_EQ(3u, jacks.size());
EXPECT_EQ(2u, jacks[0].jack_id);
EXPECT_EQ(0u, jacks[0].cable_number);
EXPECT_EQ(2u, jacks[0].endpoint_number());
EXPECT_EQ(UsbMidiJack::DIRECTION_OUT, jacks[0].direction());
- EXPECT_EQ(NULL, jacks[0].device);
+ EXPECT_EQ(nullptr, jacks[0].device);
EXPECT_EQ(3u, jacks[1].jack_id);
EXPECT_EQ(1u, jacks[1].cable_number);
EXPECT_EQ(2u, jacks[1].endpoint_number());
EXPECT_EQ(UsbMidiJack::DIRECTION_OUT, jacks[1].direction());
- EXPECT_EQ(NULL, jacks[1].device);
+ EXPECT_EQ(nullptr, jacks[1].device);
EXPECT_EQ(7u, jacks[2].jack_id);
EXPECT_EQ(0u, jacks[2].cable_number);
EXPECT_EQ(2u, jacks[2].endpoint_number());
EXPECT_EQ(UsbMidiJack::DIRECTION_IN, jacks[2].direction());
- EXPECT_EQ(NULL, jacks[2].device);
+ EXPECT_EQ(nullptr, jacks[2].device);
+}
+
+TEST(UsbMidiDescriptorParserTest, ParseDeviceInfoEmpty) {
+ UsbMidiDescriptorParser parser;
+ UsbMidiDescriptorParser::DeviceInfo info;
+ EXPECT_FALSE(parser.ParseDeviceInfo(nullptr, 0, &info));
+}
+
+TEST(UsbMidiDescriptorParserTest, ParseDeviceInfo) {
+ UsbMidiDescriptorParser parser;
+ UsbMidiDescriptorParser::DeviceInfo info;
+ uint8 data[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x01, 0x23,
+ 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x00, 0x0a,
+ };
+ EXPECT_TRUE(parser.ParseDeviceInfo(data, arraysize(data), &info));
+
+ EXPECT_EQ(0x2301, info.vendor_id);
+ EXPECT_EQ(0x6745, info.product_id);
+ EXPECT_EQ(0xab89, info.bcd_device_version);
+ EXPECT_EQ(0xcd, info.manufacturer_index);
+ EXPECT_EQ(0xef, info.product_index);
}
} // namespace
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/usb_midi_device.h b/chromium/media/midi/usb_midi_device.h
index 6af92b6ef6b..c1dc8cd71dc 100644
--- a/chromium/media/midi/usb_midi_device.h
+++ b/chromium/media/midi/usb_midi_device.h
@@ -11,15 +11,17 @@
#include "base/callback.h"
#include "base/memory/scoped_vector.h"
#include "base/time/time.h"
-#include "media/base/media_export.h"
+#include "media/midi/usb_midi_export.h"
namespace media {
+namespace midi {
+class MidiManagerUsb;
class UsbMidiDevice;
// Delegate class for UsbMidiDevice.
// Each method is called when an corresponding event arrives at the device.
-class MEDIA_EXPORT UsbMidiDeviceDelegate {
+class USB_MIDI_EXPORT UsbMidiDeviceDelegate {
public:
virtual ~UsbMidiDeviceDelegate() {}
@@ -29,12 +31,17 @@ class MEDIA_EXPORT UsbMidiDeviceDelegate {
const uint8* data,
size_t size,
base::TimeTicks time) = 0;
+
+ // Called when a USB-MIDI device is attached.
+ virtual void OnDeviceAttached(scoped_ptr<UsbMidiDevice> device) = 0;
+ // Called when a USB-MIDI device is detached.
+ virtual void OnDeviceDetached(size_t index) = 0;
};
// UsbMidiDevice represents a USB-MIDI device.
// This is an interface class and each platform-dependent implementation class
// will be a derived class.
-class MEDIA_EXPORT UsbMidiDevice {
+class USB_MIDI_EXPORT UsbMidiDevice {
public:
typedef ScopedVector<UsbMidiDevice> Devices;
@@ -45,6 +52,7 @@ class MEDIA_EXPORT UsbMidiDevice {
public:
typedef base::Callback<void(bool result, Devices* devices)> Callback;
virtual ~Factory() {}
+
// Enumerates devices.
// Devices that have no USB-MIDI interfaces can be omitted.
// When the operation succeeds, |callback| will be called with |true| and
@@ -59,13 +67,23 @@ class MEDIA_EXPORT UsbMidiDevice {
virtual ~UsbMidiDevice() {}
- // Returns the descriptor of this device.
- virtual std::vector<uint8> GetDescriptor() = 0;
+ // Returns the descriptors of this device.
+ virtual std::vector<uint8> GetDescriptors() = 0;
+
+ // Return the name of the manufacturer.
+ virtual std::string GetManufacturer() = 0;
+
+ // Retur the name of the device.
+ virtual std::string GetProductName() = 0;
+
+ // Return the device version.
+ virtual std::string GetDeviceVersion() = 0;
// Sends |data| to the given USB endpoint of this device.
virtual void Send(int endpoint_number, const std::vector<uint8>& data) = 0;
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_USB_MIDI_DEVICE_H_
diff --git a/chromium/media/midi/usb_midi_device_android.cc b/chromium/media/midi/usb_midi_device_android.cc
index 8f93c3d47a0..7778519f430 100644
--- a/chromium/media/midi/usb_midi_device_android.cc
+++ b/chromium/media/midi/usb_midi_device_android.cc
@@ -4,21 +4,26 @@
#include "media/midi/usb_midi_device_android.h"
-#include <jni.h>
-#include <vector>
#include "base/android/jni_array.h"
+#include "base/i18n/icu_string_conversions.h"
+#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "jni/UsbMidiDeviceAndroid_jni.h"
+#include "media/midi/usb_midi_descriptor_parser.h"
namespace media {
+namespace midi {
UsbMidiDeviceAndroid::UsbMidiDeviceAndroid(ObjectRef raw_device,
UsbMidiDeviceDelegate* delegate)
: raw_device_(raw_device), delegate_(delegate) {
JNIEnv* env = base::android::AttachCurrentThread();
- Java_UsbMidiDeviceAndroid_registerSelf(
- env, raw_device_.obj(), reinterpret_cast<jlong>(this));
+ Java_UsbMidiDeviceAndroid_registerSelf(env, raw_device_.obj(),
+ reinterpret_cast<jlong>(this));
+
+ GetDescriptorsInternal();
+ InitDeviceInfo();
}
UsbMidiDeviceAndroid::~UsbMidiDeviceAndroid() {
@@ -26,14 +31,20 @@ UsbMidiDeviceAndroid::~UsbMidiDeviceAndroid() {
Java_UsbMidiDeviceAndroid_close(env, raw_device_.obj());
}
-std::vector<uint8> UsbMidiDeviceAndroid::GetDescriptor() {
- JNIEnv* env = base::android::AttachCurrentThread();
- base::android::ScopedJavaLocalRef<jbyteArray> descriptors =
- Java_UsbMidiDeviceAndroid_getDescriptors(env, raw_device_.obj());
+std::vector<uint8> UsbMidiDeviceAndroid::GetDescriptors() {
+ return descriptors_;
+}
- std::vector<uint8> ret;
- base::android::JavaByteArrayToByteVector(env, descriptors.obj(), &ret);
- return ret;
+std::string UsbMidiDeviceAndroid::GetManufacturer() {
+ return manufacturer_;
+}
+
+std::string UsbMidiDeviceAndroid::GetProductName() {
+ return product_;
+}
+
+std::string UsbMidiDeviceAndroid::GetDeviceVersion() {
+ return device_version_;
}
void UsbMidiDeviceAndroid::Send(int endpoint_number,
@@ -43,8 +54,8 @@ void UsbMidiDeviceAndroid::Send(int endpoint_number,
ScopedJavaLocalRef<jbyteArray> data_to_pass =
base::android::ToJavaByteArray(env, head, data.size());
- Java_UsbMidiDeviceAndroid_send(
- env, raw_device_.obj(), endpoint_number, data_to_pass.obj());
+ Java_UsbMidiDeviceAndroid_send(env, raw_device_.obj(), endpoint_number,
+ data_to_pass.obj());
}
void UsbMidiDeviceAndroid::OnData(JNIEnv* env,
@@ -56,11 +67,79 @@ void UsbMidiDeviceAndroid::OnData(JNIEnv* env,
const uint8* head = bytes.size() ? &bytes[0] : NULL;
delegate_->ReceiveUsbMidiData(this, endpoint_number, head, bytes.size(),
- base::TimeTicks::HighResNow());
+ base::TimeTicks::Now());
}
bool UsbMidiDeviceAndroid::RegisterUsbMidiDevice(JNIEnv* env) {
return RegisterNativesImpl(env);
}
+void UsbMidiDeviceAndroid::GetDescriptorsInternal() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ base::android::ScopedJavaLocalRef<jbyteArray> descriptors =
+ Java_UsbMidiDeviceAndroid_getDescriptors(env, raw_device_.obj());
+
+ base::android::JavaByteArrayToByteVector(env, descriptors.obj(),
+ &descriptors_);
+}
+
+void UsbMidiDeviceAndroid::InitDeviceInfo() {
+ UsbMidiDescriptorParser parser;
+ UsbMidiDescriptorParser::DeviceInfo info;
+
+ const uint8* data = descriptors_.size() > 0 ? &descriptors_[0] : nullptr;
+
+ if (!parser.ParseDeviceInfo(data, descriptors_.size(), &info)) {
+ // We don't report the error here. If it is critical, we will realize it
+ // when we parse the descriptors again for ports.
+ manufacturer_ = "invalid descriptor";
+ product_ = "invalid descriptor";
+ device_version_ = "invalid descriptor";
+ return;
+ }
+
+ manufacturer_ =
+ GetString(info.manufacturer_index,
+ base::StringPrintf("(vendor id = 0x%04x)", info.vendor_id));
+ product_ =
+ GetString(info.product_index,
+ base::StringPrintf("(product id = 0x%04x)", info.product_id));
+ device_version_ = info.BcdVersionToString(info.bcd_device_version);
+}
+
+std::vector<uint8> UsbMidiDeviceAndroid::GetStringDescriptor(int index) {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ base::android::ScopedJavaLocalRef<jbyteArray> descriptors =
+ Java_UsbMidiDeviceAndroid_getStringDescriptor(env, raw_device_.obj(),
+ index);
+
+ std::vector<uint8> ret;
+ base::android::JavaByteArrayToByteVector(env, descriptors.obj(), &ret);
+ return ret;
+}
+
+std::string UsbMidiDeviceAndroid::GetString(int index,
+ const std::string& backup) {
+ const uint8 DESCRIPTOR_TYPE_STRING = 3;
+
+ if (!index) {
+ // index 0 means there is no such descriptor.
+ return backup;
+ }
+ std::vector<uint8> descriptor = GetStringDescriptor(index);
+ if (descriptor.size() < 2 || descriptor.size() < descriptor[0] ||
+ descriptor[1] != DESCRIPTOR_TYPE_STRING) {
+ // |descriptor| is not a valid string descriptor.
+ return backup;
+ }
+ size_t size = descriptor[0];
+ std::string encoded(reinterpret_cast<char*>(&descriptor[0]) + 2, size - 2);
+ std::string result;
+ // Unicode ECN specifies that the string is encoded in UTF-16LE.
+ if (!base::ConvertToUtf8AndNormalize(encoded, "utf-16le", &result))
+ return backup;
+ return result;
+}
+
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/usb_midi_device_android.h b/chromium/media/midi/usb_midi_device_android.h
index 053e50e0ddf..c4fd1ad29db 100644
--- a/chromium/media/midi/usb_midi_device_android.h
+++ b/chromium/media/midi/usb_midi_device_android.h
@@ -6,29 +6,33 @@
#define MEDIA_MIDI_USB_MIDI_DEVICE_ANDROID_H_
#include <jni.h>
+#include <string>
#include <vector>
#include "base/android/scoped_java_ref.h"
#include "base/basictypes.h"
#include "base/callback.h"
-#include "media/base/media_export.h"
#include "media/midi/usb_midi_device.h"
+#include "media/midi/usb_midi_export.h"
namespace media {
+namespace midi {
-class MEDIA_EXPORT UsbMidiDeviceAndroid : public UsbMidiDevice {
+class USB_MIDI_EXPORT UsbMidiDeviceAndroid : public UsbMidiDevice {
public:
typedef base::android::ScopedJavaLocalRef<jobject> ObjectRef;
static scoped_ptr<Factory> CreateFactory();
UsbMidiDeviceAndroid(ObjectRef raw_device, UsbMidiDeviceDelegate* delegate);
- virtual ~UsbMidiDeviceAndroid();
+ ~UsbMidiDeviceAndroid() override;
// UsbMidiDevice implementation.
- virtual std::vector<uint8> GetDescriptor() override;
- virtual void Send(int endpoint_number,
- const std::vector<uint8>& data) override;
+ std::vector<uint8> GetDescriptors() override;
+ std::string GetManufacturer() override;
+ std::string GetProductName() override;
+ std::string GetDeviceVersion() override;
+ void Send(int endpoint_number, const std::vector<uint8>& data) override;
// Called by the Java world.
void OnData(JNIEnv* env,
@@ -39,13 +43,24 @@ class MEDIA_EXPORT UsbMidiDeviceAndroid : public UsbMidiDevice {
static bool RegisterUsbMidiDevice(JNIEnv* env);
private:
+ void GetDescriptorsInternal();
+ void InitDeviceInfo();
+ std::vector<uint8> GetStringDescriptor(int index);
+ std::string GetString(int index, const std::string& backup);
+
// The actual device object.
base::android::ScopedJavaGlobalRef<jobject> raw_device_;
UsbMidiDeviceDelegate* delegate_;
+ std::vector<uint8> descriptors_;
+ std::string manufacturer_;
+ std::string product_;
+ std::string device_version_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(UsbMidiDeviceAndroid);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_USB_MIDI_DEVICE_ANDROID_H_
diff --git a/chromium/media/midi/usb_midi_device_factory_android.cc b/chromium/media/midi/usb_midi_device_factory_android.cc
index 081e6b2c2d2..636316c3183 100644
--- a/chromium/media/midi/usb_midi_device_factory_android.cc
+++ b/chromium/media/midi/usb_midi_device_factory_android.cc
@@ -4,20 +4,17 @@
#include "media/midi/usb_midi_device_factory_android.h"
-#include <jni.h>
-#include <vector>
-#include "base/android/scoped_java_ref.h"
#include "base/bind.h"
#include "base/containers/hash_tables.h"
#include "base/lazy_instance.h"
-#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "base/synchronization/lock.h"
#include "jni/UsbMidiDeviceFactoryAndroid_jni.h"
#include "media/midi/usb_midi_device_android.h"
namespace media {
+namespace midi {
namespace {
@@ -30,7 +27,8 @@ UsbMidiDeviceFactoryAndroid::UsbMidiDeviceFactoryAndroid() : delegate_(NULL) {}
UsbMidiDeviceFactoryAndroid::~UsbMidiDeviceFactoryAndroid() {
JNIEnv* env = base::android::AttachCurrentThread();
if (!raw_factory_.is_null())
- Java_UsbMidiDeviceFactoryAndroid_close(env, raw_factory_.obj());
+ Java_UsbMidiDeviceFactoryAndroid_close(
+ env, raw_factory_.obj(), base::android::GetApplicationContext());
}
void UsbMidiDeviceFactoryAndroid::EnumerateDevices(
@@ -39,7 +37,8 @@ void UsbMidiDeviceFactoryAndroid::EnumerateDevices(
DCHECK(!delegate_);
JNIEnv* env = base::android::AttachCurrentThread();
uintptr_t pointer = reinterpret_cast<uintptr_t>(this);
- raw_factory_.Reset(Java_UsbMidiDeviceFactoryAndroid_create(env, pointer));
+ raw_factory_.Reset(Java_UsbMidiDeviceFactoryAndroid_create(
+ env, base::android::GetApplicationContext(), pointer));
delegate_ = delegate;
callback_ = callback;
@@ -70,8 +69,28 @@ void UsbMidiDeviceFactoryAndroid::OnUsbMidiDeviceRequestDone(
callback_.Run(true, &devices_to_pass);
}
+// Called from the Java world.
+void UsbMidiDeviceFactoryAndroid::OnUsbMidiDeviceAttached(
+ JNIEnv* env,
+ jobject caller,
+ jobject device) {
+ UsbMidiDeviceAndroid::ObjectRef raw_device(env, device);
+ delegate_->OnDeviceAttached(
+ scoped_ptr<UsbMidiDevice>(
+ new UsbMidiDeviceAndroid(raw_device, delegate_)));
+}
+
+// Called from the Java world.
+void UsbMidiDeviceFactoryAndroid::OnUsbMidiDeviceDetached(
+ JNIEnv* env,
+ jobject caller,
+ jint index) {
+ delegate_->OnDeviceDetached(index);
+}
+
bool UsbMidiDeviceFactoryAndroid::RegisterUsbMidiDeviceFactory(JNIEnv* env) {
return RegisterNativesImpl(env);
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/usb_midi_device_factory_android.h b/chromium/media/midi/usb_midi_device_factory_android.h
index fb1792e38e6..d81586a37b3 100644
--- a/chromium/media/midi/usb_midi_device_factory_android.h
+++ b/chromium/media/midi/usb_midi_device_factory_android.h
@@ -13,24 +13,28 @@
#include "base/callback.h"
#include "base/memory/scoped_vector.h"
#include "base/memory/weak_ptr.h"
-#include "media/base/media_export.h"
#include "media/midi/usb_midi_device.h"
+#include "media/midi/usb_midi_export.h"
namespace media {
+namespace midi {
// This class enumerates UsbMidiDevices.
-class MEDIA_EXPORT UsbMidiDeviceFactoryAndroid : public UsbMidiDevice::Factory {
+class USB_MIDI_EXPORT UsbMidiDeviceFactoryAndroid
+ : public UsbMidiDevice::Factory {
public:
UsbMidiDeviceFactoryAndroid();
- virtual ~UsbMidiDeviceFactoryAndroid();
+ ~UsbMidiDeviceFactoryAndroid() override;
// UsbMidiDevice::Factory implementation.
- virtual void EnumerateDevices(UsbMidiDeviceDelegate* delegate,
- Callback callback) override;
+ void EnumerateDevices(UsbMidiDeviceDelegate* delegate,
+ Callback callback) override;
void OnUsbMidiDeviceRequestDone(JNIEnv* env,
jobject caller,
jobjectArray devices);
+ void OnUsbMidiDeviceAttached(JNIEnv* env, jobject caller, jobject device);
+ void OnUsbMidiDeviceDetached(JNIEnv* env, jobject caller, jint index);
static bool RegisterUsbMidiDeviceFactory(JNIEnv* env);
@@ -43,6 +47,7 @@ class MEDIA_EXPORT UsbMidiDeviceFactoryAndroid : public UsbMidiDevice::Factory {
DISALLOW_COPY_AND_ASSIGN(UsbMidiDeviceFactoryAndroid);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_USB_MIDI_DEVICE_FACTORY_ANDROID_H_
diff --git a/chromium/media/midi/usb_midi_export.h b/chromium/media/midi/usb_midi_export.h
new file mode 100644
index 00000000000..0335d63cddd
--- /dev/null
+++ b/chromium/media/midi/usb_midi_export.h
@@ -0,0 +1,33 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_EXPORT_H_
+#define MEDIA_MIDI_USB_MIDI_EXPORT_H_
+
+// These files can be included as part of the midi component using the
+// MIDI_IMPLEMENTATION define (where we want to export in the component build),
+// or in the unit tests (where we never want to export/import, even in
+// component mode). The EXPORT_USB_MIDI define controls this.
+#if defined(COMPONENT_BUILD) && defined(EXPORT_USB_MIDI)
+#if defined(WIN32)
+
+#if defined(MIDI_IMPLEMENTATION)
+#define USB_MIDI_EXPORT __declspec(dllexport)
+#else
+#define USB_MIDI_EXPORT __declspec(dllimport)
+#endif // defined(MIDI_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(MIDI_IMPLEMENTATION)
+#define USB_MIDI_EXPORT __attribute__((visibility("default")))
+#else
+#define USB_MIDI_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define USB_MIDI_EXPORT
+#endif
+
+#endif // MEDIA_MIDI_USB_MIDI_EXPORT_H_
diff --git a/chromium/media/midi/usb_midi_input_stream.cc b/chromium/media/midi/usb_midi_input_stream.cc
index a46596d0a86..10f71467302 100644
--- a/chromium/media/midi/usb_midi_input_stream.cc
+++ b/chromium/media/midi/usb_midi_input_stream.cc
@@ -5,14 +5,12 @@
#include "media/midi/usb_midi_input_stream.h"
#include <string.h>
-#include <map>
-#include <vector>
#include "base/logging.h"
#include "media/midi/usb_midi_device.h"
-#include "media/midi/usb_midi_jack.h"
namespace media {
+namespace midi {
UsbMidiInputStream::JackUniqueKey::JackUniqueKey(UsbMidiDevice* device,
int endpoint_number,
@@ -37,20 +35,21 @@ bool UsbMidiInputStream::JackUniqueKey::operator<(
return cable_number < that.cable_number;
}
-UsbMidiInputStream::UsbMidiInputStream(const std::vector<UsbMidiJack>& jacks,
- Delegate* delegate)
- : delegate_(delegate) {
- for (size_t i = 0; i < jacks.size(); ++i) {
- jack_dictionary_.insert(
- std::make_pair(JackUniqueKey(jacks[i].device,
- jacks[i].endpoint_number(),
- jacks[i].cable_number),
- i));
- }
-}
+UsbMidiInputStream::UsbMidiInputStream(Delegate* delegate)
+ : delegate_(delegate) {}
UsbMidiInputStream::~UsbMidiInputStream() {}
+void UsbMidiInputStream::Add(const UsbMidiJack& jack) {
+ JackUniqueKey key(jack.device,
+ jack.endpoint_number(),
+ jack.cable_number);
+
+ jacks_.push_back(jack);
+ DCHECK(jack_dictionary_.end() == jack_dictionary_.find(key));
+ jack_dictionary_.insert(std::make_pair(key, jack_dictionary_.size()));
+}
+
void UsbMidiInputStream::OnReceivedData(UsbMidiDevice* device,
int endpoint_number,
const uint8* data,
@@ -89,17 +88,5 @@ void UsbMidiInputStream::ProcessOnePacket(UsbMidiDevice* device,
delegate_->OnReceivedData(it->second, &packet[1], packet_size, time);
}
-std::vector<UsbMidiInputStream::JackUniqueKey>
-UsbMidiInputStream::RegisteredJackKeysForTesting() const {
- std::vector<JackUniqueKey> result(jack_dictionary_.size(),
- JackUniqueKey(0, 0, 0));
- for (std::map<JackUniqueKey, size_t>::const_iterator it =
- jack_dictionary_.begin();
- it != jack_dictionary_.end(); ++it) {
- DCHECK_LT(it->second, result.size());
- result[it->second] = it->first;
- }
- return result;
-}
-
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/usb_midi_input_stream.h b/chromium/media/midi/usb_midi_input_stream.h
index 0841751f39c..8fb732e8017 100644
--- a/chromium/media/midi/usb_midi_input_stream.h
+++ b/chromium/media/midi/usb_midi_input_stream.h
@@ -11,19 +11,20 @@
#include "base/basictypes.h"
#include "base/containers/hash_tables.h"
#include "base/time/time.h"
-#include "media/base/media_export.h"
+#include "media/midi/usb_midi_export.h"
#include "media/midi/usb_midi_jack.h"
namespace media {
+namespace midi {
class UsbMidiDevice;
// UsbMidiInputStream converts USB-MIDI data to MIDI data.
// See "USB Device Class Definition for MIDI Devices" Release 1.0,
// Section 4 "USB-MIDI Event Packets" for details.
-class MEDIA_EXPORT UsbMidiInputStream {
+class USB_MIDI_EXPORT UsbMidiInputStream {
public:
- class MEDIA_EXPORT Delegate {
+ class USB_MIDI_EXPORT Delegate {
public:
virtual ~Delegate() {}
// This function is called when some data arrives to a USB-MIDI jack.
@@ -45,10 +46,11 @@ class MEDIA_EXPORT UsbMidiInputStream {
int cable_number;
};
- UsbMidiInputStream(const std::vector<UsbMidiJack>& jacks,
- Delegate* delegate);
+ explicit UsbMidiInputStream(Delegate* delegate);
~UsbMidiInputStream();
+ void Add(const UsbMidiJack& jack);
+
// This function should be called when some data arrives to a USB-MIDI
// endpoint. This function converts the data to MIDI data and call
// |delegate->OnReceivedData| with it.
@@ -59,7 +61,7 @@ class MEDIA_EXPORT UsbMidiInputStream {
size_t size,
base::TimeTicks time);
- std::vector<JackUniqueKey> RegisteredJackKeysForTesting() const;
+ const std::vector<UsbMidiJack>& jacks() const { return jacks_; }
private:
static const size_t kPacketSize = 4;
@@ -70,6 +72,7 @@ class MEDIA_EXPORT UsbMidiInputStream {
const uint8* packet,
base::TimeTicks time);
+ std::vector<UsbMidiJack> jacks_;
// A map from UsbMidiJack to its index in |jacks_|.
std::map<JackUniqueKey, size_t> jack_dictionary_;
@@ -79,6 +82,7 @@ class MEDIA_EXPORT UsbMidiInputStream {
DISALLOW_COPY_AND_ASSIGN(UsbMidiInputStream);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_USB_MIDI_INPUT_STREAM_H_
diff --git a/chromium/media/midi/usb_midi_input_stream_unittest.cc b/chromium/media/midi/usb_midi_input_stream_unittest.cc
index 835d67273f7..99a969052e1 100644
--- a/chromium/media/midi/usb_midi_input_stream_unittest.cc
+++ b/chromium/media/midi/usb_midi_input_stream_unittest.cc
@@ -16,6 +16,7 @@
using base::TimeTicks;
namespace media {
+namespace midi {
namespace {
@@ -23,7 +24,10 @@ class TestUsbMidiDevice : public UsbMidiDevice {
public:
TestUsbMidiDevice() {}
~TestUsbMidiDevice() override {}
- std::vector<uint8> GetDescriptor() override { return std::vector<uint8>(); }
+ std::vector<uint8> GetDescriptors() override { return std::vector<uint8>(); }
+ std::string GetManufacturer() override { return std::string(); }
+ std::string GetProductName() override { return std::string(); }
+ std::string GetDeviceVersion() override { return std::string(); }
void Send(int endpoint_number, const std::vector<uint8>& data) override {}
private:
@@ -53,26 +57,24 @@ class MockDelegate : public UsbMidiInputStream::Delegate {
class UsbMidiInputStreamTest : public ::testing::Test {
protected:
UsbMidiInputStreamTest() {
- std::vector<UsbMidiJack> jacks;
-
- jacks.push_back(UsbMidiJack(&device1_,
- 84, // jack_id
- 4, // cable_number
- 135)); // endpoint_address
- jacks.push_back(UsbMidiJack(&device2_,
- 85,
- 5,
- 137));
- jacks.push_back(UsbMidiJack(&device2_,
- 84,
- 4,
- 135));
- jacks.push_back(UsbMidiJack(&device1_,
- 85,
- 5,
- 135));
-
- stream_.reset(new UsbMidiInputStream(jacks, &delegate_));
+ stream_.reset(new UsbMidiInputStream(&delegate_));
+
+ stream_->Add(UsbMidiJack(&device1_,
+ 84, // jack_id
+ 4, // cable_number
+ 135)); // endpoint_address
+ stream_->Add(UsbMidiJack(&device2_,
+ 85,
+ 5,
+ 137));
+ stream_->Add(UsbMidiJack(&device2_,
+ 84,
+ 4,
+ 135));
+ stream_->Add(UsbMidiJack(&device1_,
+ 85,
+ 5,
+ 135));
}
TestUsbMidiDevice device1_;
@@ -172,4 +174,5 @@ TEST_F(UsbMidiInputStreamTest, DispatchForDevice2) {
} // namespace
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/usb_midi_jack.h b/chromium/media/midi/usb_midi_jack.h
index 271cad88e72..8315564e2c5 100644
--- a/chromium/media/midi/usb_midi_jack.h
+++ b/chromium/media/midi/usb_midi_jack.h
@@ -6,14 +6,15 @@
#define MEDIA_MIDI_USB_MIDI_JACK_H_
#include "base/basictypes.h"
-#include "media/base/media_export.h"
+#include "media/midi/usb_midi_export.h"
namespace media {
+namespace midi {
class UsbMidiDevice;
// UsbMidiJack represents an EMBEDDED MIDI jack.
-struct MEDIA_EXPORT UsbMidiJack {
+struct USB_MIDI_EXPORT UsbMidiJack {
// The direction of the endpoint associated with an EMBEDDED MIDI jack.
// Note that an IN MIDI jack associated with an OUT endpoint has
// ***DIRECTION_OUT*** direction.
@@ -46,6 +47,7 @@ struct MEDIA_EXPORT UsbMidiJack {
}
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_USB_MIDI_JACK_H_
diff --git a/chromium/media/midi/usb_midi_output_stream.cc b/chromium/media/midi/usb_midi_output_stream.cc
index 2f3289754f0..ded37e401ef 100644
--- a/chromium/media/midi/usb_midi_output_stream.cc
+++ b/chromium/media/midi/usb_midi_output_stream.cc
@@ -9,6 +9,7 @@
#include "media/midi/usb_midi_device.h"
namespace media {
+namespace midi {
UsbMidiOutputStream::UsbMidiOutputStream(const UsbMidiJack& jack)
: jack_(jack), pending_size_(0), is_sending_sysex_(false) {}
@@ -98,7 +99,7 @@ bool UsbMidiOutputStream::PushSysExMessage(const std::vector<uint8>& data,
message[message_size] = byte;
++message_size;
if (byte == kEndOfSysExByte) {
- uint8 code_index = message_size + 0x4;
+ uint8 code_index = static_cast<uint8>(message_size) + 0x4;
DCHECK(code_index == 0x5 || code_index == 0x6 || code_index == 0x7);
data_to_send->push_back((jack_.cable_number << 4) | code_index);
data_to_send->insert(data_to_send->end(),
@@ -190,4 +191,5 @@ bool UsbMidiOutputStream::PushChannelMessage(const std::vector<uint8>& data,
return true;
}
+} // namespace midi
} // namespace media
diff --git a/chromium/media/midi/usb_midi_output_stream.h b/chromium/media/midi/usb_midi_output_stream.h
index 1d98d584fd0..6fc71716d66 100644
--- a/chromium/media/midi/usb_midi_output_stream.h
+++ b/chromium/media/midi/usb_midi_output_stream.h
@@ -8,15 +8,16 @@
#include <vector>
#include "base/basictypes.h"
-#include "media/base/media_export.h"
+#include "media/midi/usb_midi_export.h"
#include "media/midi/usb_midi_jack.h"
namespace media {
+namespace midi {
// UsbMidiOutputStream converts MIDI data to USB-MIDI data.
// See "USB Device Class Definition for MIDI Devices" Release 1.0,
// Section 4 "USB-MIDI Event Packets" for details.
-class MEDIA_EXPORT UsbMidiOutputStream {
+class USB_MIDI_EXPORT UsbMidiOutputStream {
public:
explicit UsbMidiOutputStream(const UsbMidiJack& jack);
@@ -52,6 +53,7 @@ class MEDIA_EXPORT UsbMidiOutputStream {
DISALLOW_COPY_AND_ASSIGN(UsbMidiOutputStream);
};
+} // namespace midi
} // namespace media
#endif // MEDIA_MIDI_USB_MIDI_OUTPUT_STREAM_H_
diff --git a/chromium/media/midi/usb_midi_output_stream_unittest.cc b/chromium/media/midi/usb_midi_output_stream_unittest.cc
index f682f42ac93..b0e2d5be55f 100644
--- a/chromium/media/midi/usb_midi_output_stream_unittest.cc
+++ b/chromium/media/midi/usb_midi_output_stream_unittest.cc
@@ -13,6 +13,7 @@
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
+namespace midi {
namespace {
@@ -26,7 +27,10 @@ class MockUsbMidiDevice : public UsbMidiDevice {
MockUsbMidiDevice() {}
~MockUsbMidiDevice() override {}
- std::vector<uint8> GetDescriptor() override { return std::vector<uint8>(); }
+ std::vector<uint8> GetDescriptors() override { return std::vector<uint8>(); }
+ std::string GetManufacturer() override { return std::string(); }
+ std::string GetProductName() override { return std::string(); }
+ std::string GetDeviceVersion() override { return std::string(); }
void Send(int endpoint_number, const std::vector<uint8>& data) override {
for (size_t i = 0; i < data.size(); ++i) {
@@ -270,4 +274,5 @@ TEST_F(UsbMidiOutputStreamTest, SendRealTimeInSysExMessage) {
} // namespace
+} // namespace midi
} // namespace media
diff --git a/chromium/media/mojo/BUILD.gn b/chromium/media/mojo/BUILD.gn
index d30848b1921..fb983772c76 100644
--- a/chromium/media/mojo/BUILD.gn
+++ b/chromium/media/mojo/BUILD.gn
@@ -12,6 +12,6 @@ group("mojo") {
group("tests") {
testonly = true
deps = [
- "//media/mojo/services:tests"
- ]
+ "//media/mojo/services:tests",
+ ]
}
diff --git a/chromium/media/mojo/DEPS b/chromium/media/mojo/DEPS
index 1d3279ec857..fe3c60e0141 100644
--- a/chromium/media/mojo/DEPS
+++ b/chromium/media/mojo/DEPS
@@ -1,5 +1,6 @@
include_rules = [
"+mojo/application",
+ "+mojo/common",
"+mojo/converters",
- "+mojo/public",
+ "+third_party/mojo/src/mojo/public",
]
diff --git a/chromium/media/mojo/interfaces/BUILD.gn b/chromium/media/mojo/interfaces/BUILD.gn
index a4469fd34e4..1f2ae38da4e 100644
--- a/chromium/media/mojo/interfaces/BUILD.gn
+++ b/chromium/media/mojo/interfaces/BUILD.gn
@@ -2,17 +2,22 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("//mojo/public/tools/bindings/mojom.gni")
+import("//third_party/mojo/src/mojo/public/tools/bindings/mojom.gni")
-# GYP version: media/media.gyp:mojo_media_bindings
mojom("interfaces") {
sources = [
- "media_types.mojom",
- "media_renderer.mojom",
+ "content_decryption_module.mojom",
+ "decryptor.mojom",
"demuxer_stream.mojom",
+ "media_renderer.mojom",
+ "media_types.mojom",
]
+ if (is_chromeos) {
+ sources += [ "platform_verification.mojom" ]
+ }
+
deps = [
- "//mojo/services/public/interfaces/geometry",
+ "//ui/mojo/geometry:interfaces",
]
}
diff --git a/chromium/media/mojo/interfaces/content_decryption_module.mojom b/chromium/media/mojo/interfaces/content_decryption_module.mojom
new file mode 100644
index 00000000000..24d407236a5
--- /dev/null
+++ b/chromium/media/mojo/interfaces/content_decryption_module.mojom
@@ -0,0 +1,144 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module mojo;
+
+import "media/mojo/interfaces/decryptor.mojom";
+
+// Transport layer of media::MediaKeys::Exception (see media/base/media_keys.h).
+// This is used for ContentDecryptionModule (CDM) promise rejections.
+// Note: This can also be used for session errors in prefixed API.
+enum CdmException {
+ NOT_SUPPORTED_ERROR,
+ INVALID_STATE_ERROR,
+ INVALID_ACCESS_ERROR,
+ QUOTA_EXCEEDED_ERROR,
+ UNKNOWN_ERROR,
+ CLIENT_ERROR,
+ OUTPUT_ERROR
+};
+
+// Transport layer of media::CdmKeyInformation::KeyStatus (see
+// media/base/cdm_key_information.h). This is used for indicating the status
+// of a specific key ID.
+enum CdmKeyStatus {
+ USABLE,
+ INTERNAL_ERROR,
+ EXPIRED,
+ OUTPUT_NOT_ALLOWED
+};
+
+// Transport layer of media::CdmPromise (see media/base/cdm_promise.h).
+// - When |success| is true, the promise is resolved and all other fields should
+// be ignored.
+// - When |success| is false, the promise is rejected with |exception|,
+// |system_code| and |error_message|.
+struct CdmPromiseResult {
+ bool success;
+ CdmException exception;
+ uint32 system_code;
+ string error_message;
+};
+
+// Transport layer of media::CdmKeyInformation (see
+// media/base/cdm_key_information.h). It is used to specify a key_id and it's
+// associated status.
+struct CdmKeyInformation {
+ array<uint8> key_id;
+ CdmKeyStatus status;
+ uint32 system_code;
+};
+
+// See media::MediaKeys::MessageType
+enum CdmMessageType {
+ LICENSE_REQUEST,
+ LICENSE_RENEWAL,
+ LICENSE_RELEASE
+};
+
+// An interface that represents a CDM in the Encrypted Media Extensions (EME)
+// spec (https://w3c.github.io/encrypted-media/). See media/base/media_keys.h.
+interface ContentDecryptionModule {
+ // See media::MediaKeys::SessionType.
+ enum SessionType {
+ TEMPORARY_SESSION,
+ PERSISTENT_LICENSE_SESSION,
+ PERSISTENT_RELEASE_MESSAGE_SESSION
+ };
+
+ // See media::EmeInitDataType.
+ enum InitDataType {
+ UNKNOWN,
+ WEBM,
+ CENC,
+ KEYIDS
+ };
+
+ // Sets ContentDecryptionModuleClient. Must be called before any other calls.
+ SetClient(ContentDecryptionModuleClient client);
+
+ // Provides a server certificate to be used to encrypt messages to the
+ // license server.
+ SetServerCertificate(array<uint8> certificate_data)
+ => (CdmPromiseResult result);
+
+ // Creates a session with the |init_data_type|, |init_data| and |session_type|
+ // provided. If |result.success| is false, the output |session_id| will be
+ // null.
+ CreateSessionAndGenerateRequest(SessionType session_type,
+ InitDataType init_data_type,
+ array<uint8> init_data)
+ => (CdmPromiseResult result, string? session_id);
+
+ // Loads the session associated with |session_id| and |session_type|.
+ // Combinations of |result.success| and |session_id| means:
+ // (true, non-null) : Session successfully loaded.
+ // (true, null) : Session not found.
+ // (false, non-null): N/A; this combination is not allowed.
+ // (false, null) : Unexpected error. See other fields in |result|.
+ LoadSession(SessionType session_type, string session_id)
+ => (CdmPromiseResult result, string? session_id);
+
+ // Updates a session specified by |session_id| with |response|.
+ UpdateSession(string session_id, array<uint8> response)
+ => (CdmPromiseResult result);
+
+ // Closes the session specified by |session_id|.
+ CloseSession(string session_id) => (CdmPromiseResult result);
+
+ // Removes stored session data associated with the active session specified by
+ // |session_id|.
+ RemoveSession(string session_id) => (CdmPromiseResult result);
+
+ // Assigns the |cdm_id| to the CDM, and retrieves the |decryptor| associated
+ // with this CDM instance.
+ // A CDM implementation must choose to support either an explicit or implicit
+ // decryptor:
+ // - Explicit (non-null) decryptor: The client (e.g. media pipeline) will use
+ // the |decryptor| directly to decrypt (and decode) encrypted buffers.
+ // - Implicit (null) decryptor: The client (e.g. media pipeline) will use the
+ // |cdm_id| to locate a decryptor and associate it with the client.
+ // Note: In Chromium GetCdmContext() is a sync call. But we don't have an easy
+ // way to support sync calls on a mojo interface. Instead the client should
+ // generate a client side ID and pass it to the service.
+ GetCdmContext(int32 cdm_id, Decryptor&? decryptor);
+};
+
+// Session callbacks. See media/base/media_keys.h for details.
+interface ContentDecryptionModuleClient {
+ OnSessionMessage(string session_id, CdmMessageType message_type,
+ array<uint8> message, string legacy_destination_url);
+
+ OnSessionClosed(string session_id);
+
+ OnLegacySessionError(string session_id, CdmException exception,
+ uint32 system_code, string error_message);
+
+ OnSessionKeysChange(string session_id, bool has_additional_usable_key,
+ array<CdmKeyInformation> key_information);
+
+ // Provide session expiration update for |session_id|.
+ // |new_expiry_time_sec| is the number of seconds since epoch (Jan 1, 1970).
+ OnSessionExpirationUpdate(string session_id, double new_expiry_time_sec);
+};
diff --git a/chromium/media/mojo/interfaces/decryptor.mojom b/chromium/media/mojo/interfaces/decryptor.mojom
new file mode 100644
index 00000000000..55091e4671c
--- /dev/null
+++ b/chromium/media/mojo/interfaces/decryptor.mojom
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module mojo;
+
+import "media/mojo/interfaces/demuxer_stream.mojom";
+import "media/mojo/interfaces/media_types.mojom";
+
+// TODO(xhwang): Add mojo types for AudioBuffer and VideoFrame.
+struct AudioBuffer {};
+struct VideoFrame {};
+
+// Interface for decrypting (and decoding) encrypted streams.
+// See media/base/decryptor.h for details.
+interface Decryptor {
+ // Status of a decrypt or decrypt-and-decode operation. The returned
+ // buffer/frame of such an operation is NOT null iff the status is SUCCESS.
+ enum Status {
+ SUCCESS, // Successfully completed. Decrypted buffer ready.
+ NO_KEY, // No key is available to decrypt.
+ NEED_MORE_DATA, // Decoder needs more data to produce an output.
+ ERROR // Key is available but an error occurred during decryption.
+ };
+
+ // Decrypts the |encrypted| buffer and returns the decrypt |status| and
+ // decrypted |buffer|.
+ // At most one decrypt call is allowed at any time for a |stream_type|.
+ Decrypt(DemuxerStream.Type stream_type, MediaDecoderBuffer encrypted)
+ => (Status status, MediaDecoderBuffer? buffer);
+
+ // Cancels any pending decrypt for |stream_type| with SUCCESS.
+ CancelDecrypt(DemuxerStream.Type stream_type);
+
+ // Initializes a decoder with the given |config|. Returns whether the
+ // initialization succeeded.
+ InitializeAudioDecoder(AudioDecoderConfig config) => (bool success);
+ InitializeVideoDecoder(VideoDecoderConfig config) => (bool success);
+
+ // Decrypts and decodes the |encrypted| buffer and returns the |status| and
+ // the decrypted |audio_buffers| or |video_frame|.
+ // At end-of-stream, this method should be called repeatedly with
+ // end-of-stream |encrypted| until no buffer/frame can be produced.
+ // These methods can only be called after the corresponding decoder has
+ // been successfully initialized.
+ // At most one decrypt-and-decode call is allowed at any time for a
+ // |stream_type|.
+ DecryptAndDecodeAudio(MediaDecoderBuffer encrypted)
+ => (Status status, array<AudioBuffer>? audio_buffers);
+ DecryptAndDecodeVideo(
+ MediaDecoderBuffer encrypted) => (Status status, VideoFrame? video_frame);
+
+ // Resets the decoder for |stream_type| to a clean initialized state and
+ // cancels any pending decrypt-and-decode operations immediately with ERROR.
+ // This method can only be called after the corresponding decoder has been
+ // successfully initialized.
+ ResetDecoder(DemuxerStream.Type stream_type);
+
+ // Releases decoder resources, deinitializes the decoder, aborts any pending
+ // initialization (with false) or decrypt-and-decode (with ERROR) for
+ // |stream_type| immediately.
+ // This method can be called any time after Initialize{Audio|Video}Decoder()
+ // has been called (with the correct stream type).
+ // After this operation, the decoder is set to an uninitialized state.
+ // The decoder can be reinitialized after it is deinitialized.
+ DeinitializeDecoder(DemuxerStream.Type stream_type);
+};
+
+interface DecryptorClient {
+ // Indicates that a new usable key is available in the CDM associated with the
+ // Decryptor.
+ OnNewUsableKey();
+};
diff --git a/chromium/media/mojo/interfaces/demuxer_stream.mojom b/chromium/media/mojo/interfaces/demuxer_stream.mojom
index 3024942ff29..24386964d3e 100644
--- a/chromium/media/mojo/interfaces/demuxer_stream.mojom
+++ b/chromium/media/mojo/interfaces/demuxer_stream.mojom
@@ -9,7 +9,6 @@ import "media/mojo/interfaces/media_types.mojom";
// DemuxerStream is modeled after media::DemuxerStream using mojo in order to
// enable proxying between a media::Pipeline and media::Renderer living in two
// different applications.
-[Client=DemuxerStreamClient]
interface DemuxerStream {
// See media::DemuxerStream for descriptions.
enum Type {
@@ -26,33 +25,32 @@ interface DemuxerStream {
CONFIG_CHANGED,
};
- // Request a MediaDecoderBuffer from this stream for decoding and rendering.
- // When available, the callback will be invoked with a Status and |response|
- // buffer. See media::DemuxerStream::ReadCB for explanation of fields.
+ // Initializes the DemuxerStream. Read() can only be called after the callback
+ // is received. The returned |pipe| will be used to fill out the data section
+ // of the media::DecoderBuffer returned via DemuxerStream::Read(). Only the
+ // config for |type| should be non-null, which is the initial config of the
+ // stream.
+ Initialize() => (Type type,
+ handle<data_pipe_consumer> pipe,
+ AudioDecoderConfig? audio_config,
+ VideoDecoderConfig? video_config);
+
+ // Requests a MediaDecoderBuffer from this stream for decoding and rendering.
+ // See media::DemuxerStream::ReadCB for a general explanation of the fields.
//
- // TODO(tim): Remove this method in favor of initializing the
- // DemuxerStreamClient with a DataPipeConsumerHandle once we have a framed
- // DataPipe that we can serialize [|status| | response|]* over directly.
- Read() => (Status status, MediaDecoderBuffer response);
-};
-
-interface DemuxerStreamClient {
- // Informs the client that the stream is ready for reading. If |pipe| is
- // present, it means the client should read
- //
- // [ |DemuxerStream::Status| |MediaDecoderBuffer| ]
+ // Notes on the callback:
+ // - If |status| is OK, |buffer| should be non-null and clients must fill out
+ // the data section of the returned media::DecoderBuffer by reading from
+ // the |pipe| provided during Initialize().
+ // - If |status| is ABORTED, all other fields should be null.
+ // - If |status| is CONFIG_CHANGED, the config for the stream type should be
+ // non-null.
//
- // payloads from the DataPipe directly. If |pipe| is NULL, it means the
- // client needs to use DemuxerStream::Read() directly to obtain buffers.
- OnStreamReady(handle<data_pipe_consumer>? pipe);
-
- // A new AudioDecoderConfig is available. Will be sent by the DemuxerStream
- // whenever a DemuxerStream::STATUS_CONFIG_CHANGED is observed (either
- // in a Read() callback or over the DataPipe).
- OnAudioDecoderConfigChanged(AudioDecoderConfig config);
-
- // A new VideoDecoderConfig is available. Will be sent by the DemuxerStream
- // whenever a DemuxerStream::STATUS_CONFIG_CHANGED is observed (either
- // in a Read() callback or over the DataPipe).
- OnVideoDecoderConfigChanged(VideoDecoderConfig config);
+ // TODO(dalecurtis): Remove this method in favor of serializing everything
+ // into the DataPipe given to Initialize() once DataPipe supports framed data
+ // in a nicer fashion.
+ Read() => (Status status,
+ MediaDecoderBuffer? buffer,
+ AudioDecoderConfig? audio_config,
+ VideoDecoderConfig? video_config);
};
diff --git a/chromium/media/mojo/interfaces/media_renderer.mojom b/chromium/media/mojo/interfaces/media_renderer.mojom
index 6b7e86e149b..616e6770f72 100644
--- a/chromium/media/mojo/interfaces/media_renderer.mojom
+++ b/chromium/media/mojo/interfaces/media_renderer.mojom
@@ -7,13 +7,14 @@ module mojo;
import "media/mojo/interfaces/demuxer_stream.mojom";
import "media/mojo/interfaces/media_types.mojom";
-[Client=MediaRendererClient]
interface MediaRenderer {
// Initializes the Renderer with one or both of an audio and video stream,
// calling back upon completion.
// NOTE: If an error occurs, MediaRendererClient::OnError() will be called
// before the callback is executed.
- Initialize(DemuxerStream? audio, DemuxerStream? video) => ();
+ Initialize(MediaRendererClient client,
+ DemuxerStream? audio,
+ DemuxerStream? video) => ();
// Discards any buffered data, executing callback when completed.
// NOTE: If an error occurs, MediaRendererClient::OnError() can be called
@@ -24,7 +25,7 @@ interface MediaRenderer {
StartPlayingFrom(int64 time_usec);
// Updates the current playback rate. The default playback rate should be 1.
- SetPlaybackRate(float playback_rate);
+ SetPlaybackRate(double playback_rate);
// Sets the output volume. The default volume should be 1.
SetVolume(float volume);
diff --git a/chromium/media/mojo/interfaces/media_types.mojom b/chromium/media/mojo/interfaces/media_types.mojom
index 0745cb520fc..f23da86d7ab 100644
--- a/chromium/media/mojo/interfaces/media_types.mojom
+++ b/chromium/media/mojo/interfaces/media_types.mojom
@@ -4,17 +4,17 @@
module mojo;
-import "mojo/services/public/interfaces/geometry/geometry.mojom";
+import "ui/mojo/geometry/geometry.mojom";
// See media/base/buffering_state.h for descriptions.
-// Kept in sync with media::BufferingState via COMPILE_ASSERTs.
+// Kept in sync with media::BufferingState via static_asserts.
enum BufferingState {
HAVE_NOTHING,
HAVE_ENOUGH,
};
// See media/base/audio_decoder_config.h for descriptions.
-// Kept in sync with media::AudioCodec via COMPILE_ASSERTs.
+// Kept in sync with media::AudioCodec via static_asserts.
enum AudioCodec {
UNKNOWN = 0,
AAC = 1,
@@ -31,11 +31,12 @@ enum AudioCodec {
Opus = 12,
// EAC3 = 13,
PCM_ALAW = 14,
- MAX = PCM_ALAW,
+ ALAC = 15,
+ MAX = ALAC,
};
// See media/base/channel_layout.h for descriptions.
-// Kept in sync with media::ChannelLayout via COMPILE_ASSERTs.
+// Kept in sync with media::ChannelLayout via static_asserts.
// TODO(tim): The bindings generators will always prepend the enum name, should
// mojom therefore allow enum values starting with numbers?
enum ChannelLayout {
@@ -75,7 +76,7 @@ enum ChannelLayout {
};
// See media/base/sample_format.h for descriptions.
-// Kept in sync with media::SampleFormat via COMPILE_ASSERTs.
+// Kept in sync with media::SampleFormat via static_asserts.
enum SampleFormat {
UNKNOWN = 0,
U8,
@@ -84,11 +85,12 @@ enum SampleFormat {
F32,
PlanarS16,
PlanarF32,
- Max = PlanarF32,
+ PlanarS32,
+ Max = PlanarS32,
};
// See media/base/video_frame.h for descriptions.
-// Kept in sync with media::VideoFrame::Format via COMPILE_ASSERTs.
+// Kept in sync with media::VideoFrame::Format via static_asserts.
enum VideoFormat {
UNKNOWN = 0,
YV12,
@@ -100,11 +102,13 @@ enum VideoFormat {
YV12J,
NV12,
YV24,
- FORMAT_MAX = YV24,
+ ARGB,
+ YV12HD,
+ FORMAT_MAX = YV12HD,
};
// See media/base/video_decoder_config.h for descriptions.
-// Kept in sync with media::VideoCodec via COMPILE_ASSERTs.
+// Kept in sync with media::VideoCodec via static_asserts.
enum VideoCodec {
UNKNOWN = 0,
H264,
@@ -118,7 +122,7 @@ enum VideoCodec {
};
// See media/base/video_decoder_config.h for descriptions.
-// Kept in sync with media::VideoCodecProfile via COMPILE_ASSERTs.
+// Kept in sync with media::VideoCodecProfile via static_asserts.
enum VideoCodecProfile {
VIDEO_CODEC_PROFILE_UNKNOWN = -1,
VIDEO_CODEC_PROFILE_MIN = VIDEO_CODEC_PROFILE_UNKNOWN,
@@ -154,6 +158,7 @@ struct AudioDecoderConfig {
array<uint8>? extra_data;
int64 seek_preroll_usec;
int32 codec_delay;
+ bool is_encrypted;
};
// This defines a mojo transport format for media::VideoDecoderConfig.
@@ -169,30 +174,46 @@ struct VideoDecoderConfig {
bool is_encrypted;
};
+// This defines a mojo transport format for media::SubsampleEntry.
+// See media/base/decrypt_config.h for descriptions.
+struct SubsampleEntry {
+ uint32 clear_bytes;
+ uint32 cypher_bytes;
+};
+
+// This defines a mojo transport format for media::DecryptConfig.
+// See media/base/decrypt_config.h for descriptions.
+struct DecryptConfig {
+ string key_id;
+ string iv;
+ array<SubsampleEntry> subsamples;
+};
+
// This defines a mojo transport format for media::DecoderBuffer.
struct MediaDecoderBuffer {
// See media/base/buffers.h for details.
int64 timestamp_usec;
int64 duration_usec;
- // The number of bytes in |data|.
+ // The number of bytes present in this buffer. The data is not serialized
+ // along with this structure and must be read from a separate DataPipe.
uint32 data_size;
+ // Indicates whether or not this buffer is a random access point.
+ bool is_key_frame;
+
// This is backed by an std::vector and results in a few copies.
// Into the vector, onto and off the MessagePipe, back into a vector.
array<uint8>? side_data;
uint32 side_data_size;
+ // DecryptConfig for a encrypted buffer. NULL if the buffer is not encrypted.
+ DecryptConfig? decrypt_config;
+
// These fields indicate the amount of data to discard after decoding.
int64 front_discard_usec;
int64 back_discard_usec;
// Indicates this buffer is part of a splice around |splice_timestamp_usec|.
int64 splice_timestamp_usec;
-
- // The payload. Invalid handle indicates an end-of-stream (EOS) buffer.
- // TODO(tim): This currently results in allocating a new, largeish DataPipe
- // for each buffer. Remove this once framed data pipes exist, but using this
- // for now for prototyping audio.
- handle<data_pipe_consumer>? data;
};
diff --git a/chromium/media/mojo/interfaces/mojo_bindings.gyp b/chromium/media/mojo/interfaces/mojo_bindings.gyp
new file mode 100644
index 00000000000..44570019e11
--- /dev/null
+++ b/chromium/media/mojo/interfaces/mojo_bindings.gyp
@@ -0,0 +1,30 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ # GN version: //media/mojo/interfaces
+ 'target_name': 'platform_verification_mojo_bindings',
+ 'type': 'none',
+ 'sources': [
+ 'platform_verification.mojom',
+ ],
+ 'includes': [ '../../../third_party/mojo/mojom_bindings_generator.gypi' ],
+ },
+ {
+ 'target_name': 'platform_verification_api',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'platform_verification_mojo_bindings',
+ '../../../mojo/mojo_base.gyp:mojo_application_bindings',
+ '../../../mojo/mojo_base.gyp:mojo_environment_chromium',
+ '../../../third_party/mojo/mojo_public.gyp:mojo_cpp_bindings',
+ ],
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/media/mojo/interfaces/platform_verification.mojom.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/media/mojo/interfaces/platform_verification.mojom.h',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/mojo/interfaces/platform_verification.mojom b/chromium/media/mojo/interfaces/platform_verification.mojom
new file mode 100644
index 00000000000..e52ac952ff2
--- /dev/null
+++ b/chromium/media/mojo/interfaces/platform_verification.mojom
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(xhwang): Move this out of media if it is needed anywhere else.
+module media.interfaces;
+
+// Allows authorized services to verify that the underlying platform is trusted.
+// An example of a trusted platform is a Chrome OS device in verified boot mode.
+// This can be used for protected content playback.
+//
+// Input parameters:
+// - |service_id|: the service ID for the |challenge|.
+// - |challenge|: the challenge data.
+//
+// Output parameters:
+// - |success|: whether the platform is successfully verified. If true/false the
+// following 3 parameters should be non-empty/empty.
+// - |signed_data|: the data signed by the platform.
+// - |signed_data_signature|: the signature of the signed data block.
+// - |platform_key_certificate|: the device specific certificate for the
+// requested service.
+interface PlatformVerification {
+ ChallengePlatform(string service_id, string challenge) =>
+ (bool success,
+ string? signed_data,
+ string? signed_data_signature,
+ string? platform_key_certificate);
+};
diff --git a/chromium/media/mojo/scripts/run_mojo_media_renderer.py b/chromium/media/mojo/scripts/run_mojo_media_renderer.py
new file mode 100755
index 00000000000..66d3217d8e3
--- /dev/null
+++ b/chromium/media/mojo/scripts/run_mojo_media_renderer.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# The script launches mojo_shell to test the mojo media renderer.
+
+import argparse
+import os
+import subprocess
+import sys
+
+
+root_path = os.path.realpath(
+ os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ os.pardir,
+ os.pardir,
+ os.pardir))
+
+
+def _BuildShellCommand(args):
+ sdk_version = subprocess.check_output(["cat",
+ "third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
+ build_dir = os.path.join(root_path, args.build_dir)
+
+ shell_command = [os.path.join(build_dir, "mojo_shell")]
+
+ options = ["--enable-mojo-media-renderer"]
+ if args.verbose:
+ options.append("--vmodule=pipeline*=3,*renderer_impl*=3,"
+ "*mojo_demuxer*=3,mojo*service=3")
+
+ full_command = shell_command + options + [args.url]
+
+ if args.verbose:
+ print full_command
+
+ return full_command
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="View a URL with HTMLViewer with mojo media renderer. "
+ "You must have built //mojo, //components/html_viewer, "
+ "//mojo/services/network and //media/mojo/services first.")
+ parser.add_argument(
+ "--build-dir",
+ help="Path to the dir containing the linux-x64 binaries relative to the "
+ "repo root (default: %(default)s)",
+ default="out/Release")
+ parser.add_argument("--verbose", help="Increase output verbosity.",
+ action="store_true")
+ parser.add_argument("url", help="The URL to be viewed")
+
+ args = parser.parse_args()
+ return subprocess.call(_BuildShellCommand(args))
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/chromium/media/mojo/services/BUILD.gn b/chromium/media/mojo/services/BUILD.gn
index 018745cb618..779dadfe2c8 100644
--- a/chromium/media/mojo/services/BUILD.gn
+++ b/chromium/media/mojo/services/BUILD.gn
@@ -2,9 +2,29 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# Things needed by multiple targets, like renderer_impl and renderer_app.
-# GYP version: media/media.gyp:media_mojo_lib
-source_set("lib") {
+import("//media/media_options.gni")
+import("//testing/test.gni")
+import("//third_party/mojo/src/mojo/public/mojo_application.gni")
+
+# Target naming conventions:
+# - converters: C++/Mojo type converters.
+# - proxy: C++ implementations supported by mojo services.
+# - service: Mojo interface implementations.
+# - unittests: Unit tests for a particular class/file.
+# - test: Tests for a particular app, e.g. media.
+
+config("enable_media_mojo_renderer_config") {
+ if (enable_media_mojo_renderer) {
+ defines = [ "ENABLE_MEDIA_MOJO_RENDERER" ]
+ }
+}
+
+source_set("converters") {
+ sources = [
+ "media_type_converters.cc",
+ "media_type_converters.h",
+ ]
+
deps = [
"//base",
"//media",
@@ -12,124 +32,179 @@ source_set("lib") {
"//mojo/common",
"//mojo/converters/geometry",
"//mojo/environment:chromium",
- "//mojo/public/c/system:for_component",
- "//mojo/services/public/interfaces/geometry",
- "//skia",
+ "//third_party/mojo/src/mojo/public/c/system:for_component",
+ "//ui/mojo/geometry:interfaces",
+ ]
+}
+
+source_set("cdm_proxy") {
+ deps = [
+ ":converters",
+ "//base",
+ "//media",
+ "//media/mojo/interfaces",
+ "//mojo/application/public/interfaces",
+ "//mojo/common",
+ "//mojo/environment:chromium",
+ "//third_party/mojo/src/mojo/public/c/system:for_component",
]
sources = [
- "media_type_converters.cc",
- "media_type_converters.h",
- "mojo_demuxer_stream_adapter.cc",
- "mojo_demuxer_stream_adapter.h",
+ "mojo_cdm.cc",
+ "mojo_cdm.h",
+ "mojo_type_trait.h",
]
}
-# mojo media::Renderer proxy (to a renderer_app) implementation.
-source_set("renderer_impl_lib") {
+source_set("cdm_service") {
deps = [
- ":lib",
+ ":converters",
"//base",
"//media",
"//media/mojo/interfaces",
- "//mojo/public/interfaces/application",
+ "//mojo/application/public/interfaces",
"//mojo/common",
"//mojo/environment:chromium",
- "//mojo/public/c/system:for_component",
- "//mojo/public/cpp/application",
+ "//third_party/mojo/src/mojo/public/c/system:for_component",
+ ]
+
+ sources = [
+ "mojo_cdm_promise.cc",
+ "mojo_cdm_promise.h",
+ "mojo_cdm_service.cc",
+ "mojo_cdm_service.h",
+ "mojo_type_trait.h",
]
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+}
+
+# media::Renderer implementation using mojo::MediaRenderer.
+source_set("renderer_proxy") {
sources = [
"mojo_demuxer_stream_impl.cc",
"mojo_demuxer_stream_impl.h",
+ "mojo_renderer_factory.cc",
+ "mojo_renderer_factory.h",
"mojo_renderer_impl.cc",
"mojo_renderer_impl.h",
]
-}
-# mojo media::Renderer application.
-# GYP version: media/media.gyp:mojo_media_renderer_app
-shared_library("renderer_app") {
- output_name = "mojo_media_renderer_app"
+ public_configs = [ ":enable_media_mojo_renderer_config" ]
deps = [
+ ":converters",
"//base",
"//media",
- "//media:shared_memory_support",
"//media/mojo/interfaces",
+ "//mojo/application/public/interfaces",
"//mojo/common",
- "//mojo/application",
- "//mojo/public/c/system:for_shared_library",
- ":lib",
+ "//mojo/environment:chromium",
+ "//third_party/mojo/src/mojo/public/c/system:for_component",
]
+}
+# MediaRenderer service; without a specific config.
+source_set("renderer_service_generic") {
sources = [
"demuxer_stream_provider_shim.cc",
"demuxer_stream_provider_shim.h",
+ "mojo_demuxer_stream_adapter.cc",
+ "mojo_demuxer_stream_adapter.h",
"mojo_renderer_service.cc",
"mojo_renderer_service.h",
"renderer_config.cc",
"renderer_config.h",
- "renderer_config_default.cc",
+ ]
+
+ deps = [
+ ":converters",
+ "//base",
+ "//media",
+ "//media/mojo/interfaces",
+ "//media:shared_memory_support",
+ "//mojo/common",
]
}
-test("mojo_media_lib_unittests") {
+# MediaRenderer service with a default config.
+source_set("renderer_service") {
sources = [
- "media_type_converters_unittest.cc",
+ "renderer_config_default.cc",
]
+ public_configs = [ ":enable_media_mojo_renderer_config" ]
+
deps = [
+ ":renderer_service_generic",
"//base",
- "//base/test:test_support",
"//media",
- "//media/mojo/interfaces",
- "//mojo/edk/system",
- "//mojo/edk/test:run_all_unittests",
- "//mojo/environment:chromium",
- "//testing/gtest",
- ":lib"
+ "//media:shared_memory_support",
]
}
-# GYP version: media/media.gyp:mojo_media_renderer_apptest
-# Not a 'test' because this is loaded via mojo_shell as an app.
-shared_library("renderer_apptest") {
- testonly = true
- output_name = "mojo_media_renderer_apptest"
+test("media_mojo_unittests") {
+ sources = [
+ "media_type_converters_unittest.cc",
+ ]
deps = [
+ ":converters",
"//base",
"//base/test:test_support",
"//media",
- "//media:shared_memory_support",
"//media/mojo/interfaces",
- "//mojo/common",
- "//mojo/application",
+ "//mojo/environment:chromium",
"//testing/gtest",
- ":renderer_impl_lib",
- ":renderer_app",
- ":lib",
- "//mojo/public/c/system:for_shared_library",
+ "//third_party/mojo/src/mojo/edk/system",
+ "//third_party/mojo/src/mojo/edk/test:run_all_unittests",
]
+}
- sources = [
- "renderer_unittest.cc",
- ]
+if (!is_component_build) {
+ mojo_native_application("media") {
+ sources = [
+ "mojo_media_application.cc",
+ ]
+
+ deps = [
+ ":renderer_service",
+ "//mojo/application",
+ "//third_party/mojo/src/mojo/public/c/system:for_shared_library",
+ ]
+ }
+
+ # Note, this 'test' must be loaded via mojo_shell as an app:
+ #
+ # out/Debug/mojo_shell mojo:media_test
+ #
+ mojo_native_application("media_test") {
+ testonly = true
+
+ deps = [
+ "//media/test:mojo_pipeline_integration_tests",
+ ]
+ }
}
group("services") {
deps = [
- ":lib",
- ":renderer_impl_lib",
- ":renderer_app",
+ ":cdm_proxy",
+ ":renderer_proxy",
]
+
+ if (!is_component_build) {
+ deps += [ ":media" ]
+ }
}
group("tests") {
testonly = true
deps = [
- ":mojo_media_lib_unittests",
- ":renderer_apptest",
+ ":media_mojo_unittests",
]
+
+ if (!is_component_build) {
+ deps += [ ":media_test" ]
+ }
}
diff --git a/chromium/media/mojo/services/demuxer_stream_provider_shim.cc b/chromium/media/mojo/services/demuxer_stream_provider_shim.cc
index 6620e6ae665..c3dbc2c7b82 100644
--- a/chromium/media/mojo/services/demuxer_stream_provider_shim.cc
+++ b/chromium/media/mojo/services/demuxer_stream_provider_shim.cc
@@ -48,12 +48,6 @@ DemuxerStream* DemuxerStreamProviderShim::GetStream(DemuxerStream::Type type) {
return nullptr;
}
-DemuxerStreamProvider::Liveness DemuxerStreamProviderShim::GetLiveness() const {
- // TODO(dalecurtis): This should be removed once liveness lives elsewhere, see
- // http://crbug.com/420025
- return DemuxerStreamProvider::LIVENESS_UNKNOWN;
-}
-
void DemuxerStreamProviderShim::OnStreamReady() {
if (++streams_ready_ == streams_.size())
base::ResetAndReturn(&demuxer_ready_cb_).Run();
diff --git a/chromium/media/mojo/services/demuxer_stream_provider_shim.h b/chromium/media/mojo/services/demuxer_stream_provider_shim.h
index e614e11c95f..ec60fe4c88a 100644
--- a/chromium/media/mojo/services/demuxer_stream_provider_shim.h
+++ b/chromium/media/mojo/services/demuxer_stream_provider_shim.h
@@ -26,7 +26,6 @@ class DemuxerStreamProviderShim : public DemuxerStreamProvider {
// DemuxerStreamProvider interface.
DemuxerStream* GetStream(DemuxerStream::Type type) override;
- Liveness GetLiveness() const override;
private:
// Called as each mojo::DemuxerStream becomes ready. Once all streams are
diff --git a/chromium/media/mojo/services/media_type_converters.cc b/chromium/media/mojo/services/media_type_converters.cc
index 95eb1ba0e3d..c0f00adcea8 100644
--- a/chromium/media/mojo/services/media_type_converters.cc
+++ b/chromium/media/mojo/services/media_type_converters.cc
@@ -6,12 +6,14 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/buffering_state.h"
+#include "media/base/cdm_key_information.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/decrypt_config.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/media_keys.h"
#include "media/base/video_decoder_config.h"
#include "media/mojo/interfaces/demuxer_stream.mojom.h"
#include "mojo/converters/geometry/geometry_type_converters.h"
-#include "mojo/public/cpp/system/data_pipe.h"
namespace mojo {
@@ -44,6 +46,7 @@ ASSERT_ENUM_EQ(AudioCodec, kCodec, AUDIO_CODEC_, PCM_S16BE);
ASSERT_ENUM_EQ(AudioCodec, kCodec, AUDIO_CODEC_, PCM_S24BE);
ASSERT_ENUM_EQ(AudioCodec, kCodec, AUDIO_CODEC_, Opus);
ASSERT_ENUM_EQ(AudioCodec, kCodec, AUDIO_CODEC_, PCM_ALAW);
+ASSERT_ENUM_EQ(AudioCodec, kCodec, AUDIO_CODEC_, ALAC);
ASSERT_ENUM_EQ_RAW(AudioCodec, kAudioCodecMax, AUDIO_CODEC_MAX);
// ChannelLayout.
@@ -139,6 +142,8 @@ ASSERT_ENUM_EQ_RAW(VideoFrame::Format,
ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV12J, VIDEO_FORMAT_YV12J);
ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::NV12, VIDEO_FORMAT_NV12);
ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV24, VIDEO_FORMAT_YV24);
+ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::ARGB, VIDEO_FORMAT_ARGB);
+ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV12HD, VIDEO_FORMAT_YV12HD);
ASSERT_ENUM_EQ_RAW(VideoFrame::Format,
VideoFrame::FORMAT_MAX,
VIDEO_FORMAT_FORMAT_MAX);
@@ -208,18 +213,112 @@ ASSERT_ENUM_EQ(VideoCodecProfile,
VIDEO_CODEC_PROFILE_,
VIDEO_CODEC_PROFILE_MAX);
+// CdmException
+#define ASSERT_CDM_EXCEPTION(value) \
+ static_assert( \
+ media::MediaKeys::value == \
+ static_cast<media::MediaKeys::Exception>(CDM_EXCEPTION_##value), \
+ "Mismatched CDM Exception")
+ASSERT_CDM_EXCEPTION(NOT_SUPPORTED_ERROR);
+ASSERT_CDM_EXCEPTION(INVALID_STATE_ERROR);
+ASSERT_CDM_EXCEPTION(INVALID_ACCESS_ERROR);
+ASSERT_CDM_EXCEPTION(QUOTA_EXCEEDED_ERROR);
+ASSERT_CDM_EXCEPTION(UNKNOWN_ERROR);
+ASSERT_CDM_EXCEPTION(CLIENT_ERROR);
+ASSERT_CDM_EXCEPTION(OUTPUT_ERROR);
+
+// CDM Session Type
+#define ASSERT_CDM_SESSION_TYPE(value) \
+ static_assert(media::MediaKeys::value == \
+ static_cast<media::MediaKeys::SessionType>( \
+ ContentDecryptionModule::SESSION_TYPE_##value), \
+ "Mismatched CDM Session Type")
+ASSERT_CDM_SESSION_TYPE(TEMPORARY_SESSION);
+ASSERT_CDM_SESSION_TYPE(PERSISTENT_LICENSE_SESSION);
+ASSERT_CDM_SESSION_TYPE(PERSISTENT_RELEASE_MESSAGE_SESSION);
+
+// CDM InitDataType
+#define ASSERT_CDM_INIT_DATA_TYPE(value) \
+ static_assert(media::EmeInitDataType::value == \
+ static_cast<media::EmeInitDataType>( \
+ ContentDecryptionModule::INIT_DATA_TYPE_##value), \
+ "Mismatched CDM Init Data Type")
+ASSERT_CDM_INIT_DATA_TYPE(UNKNOWN);
+ASSERT_CDM_INIT_DATA_TYPE(WEBM);
+ASSERT_CDM_INIT_DATA_TYPE(CENC);
+ASSERT_CDM_INIT_DATA_TYPE(KEYIDS);
+
+// CDM Key Status
+#define ASSERT_CDM_KEY_STATUS(value) \
+ static_assert(media::CdmKeyInformation::value == \
+ static_cast<media::CdmKeyInformation::KeyStatus>( \
+ CDM_KEY_STATUS_##value), \
+ "Mismatched CDM Key Status")
+ASSERT_CDM_KEY_STATUS(USABLE);
+ASSERT_CDM_KEY_STATUS(INTERNAL_ERROR);
+ASSERT_CDM_KEY_STATUS(EXPIRED);
+ASSERT_CDM_KEY_STATUS(OUTPUT_NOT_ALLOWED);
+
+// CDM Message Type
+#define ASSERT_CDM_MESSAGE_TYPE(value) \
+ static_assert( \
+ media::MediaKeys::value == static_cast<media::MediaKeys::MessageType>( \
+ CDM_MESSAGE_TYPE_##value), \
+ "Mismatched CDM Message Type")
+ASSERT_CDM_MESSAGE_TYPE(LICENSE_REQUEST);
+ASSERT_CDM_MESSAGE_TYPE(LICENSE_RENEWAL);
+ASSERT_CDM_MESSAGE_TYPE(LICENSE_RELEASE);
+
+// static
+SubsampleEntryPtr
+TypeConverter<SubsampleEntryPtr, media::SubsampleEntry>::Convert(
+ const media::SubsampleEntry& input) {
+ SubsampleEntryPtr mojo_subsample_entry(SubsampleEntry::New());
+ mojo_subsample_entry->clear_bytes = input.clear_bytes;
+ mojo_subsample_entry->cypher_bytes = input.cypher_bytes;
+ return mojo_subsample_entry.Pass();
+}
+
+// static
+media::SubsampleEntry
+TypeConverter<media::SubsampleEntry, SubsampleEntryPtr>::Convert(
+ const SubsampleEntryPtr& input) {
+ return media::SubsampleEntry(input->clear_bytes, input->cypher_bytes);
+}
+
+// static
+DecryptConfigPtr TypeConverter<DecryptConfigPtr, media::DecryptConfig>::Convert(
+ const media::DecryptConfig& input) {
+ DecryptConfigPtr mojo_decrypt_config(DecryptConfig::New());
+ mojo_decrypt_config->key_id = input.key_id();
+ mojo_decrypt_config->iv = input.iv();
+ mojo_decrypt_config->subsamples =
+ Array<SubsampleEntryPtr>::From(input.subsamples());
+ return mojo_decrypt_config.Pass();
+}
+
+// static
+scoped_ptr<media::DecryptConfig>
+TypeConverter<scoped_ptr<media::DecryptConfig>, DecryptConfigPtr>::Convert(
+ const DecryptConfigPtr& input) {
+ return make_scoped_ptr(new media::DecryptConfig(
+ input->key_id, input->iv,
+ input->subsamples.To<std::vector<media::SubsampleEntry>>()));
+}
+
// static
MediaDecoderBufferPtr TypeConverter<MediaDecoderBufferPtr,
scoped_refptr<media::DecoderBuffer> >::Convert(
const scoped_refptr<media::DecoderBuffer>& input) {
- MediaDecoderBufferPtr mojo_buffer(MediaDecoderBuffer::New());
- DCHECK(!mojo_buffer->data.is_valid());
+ DCHECK(input);
+ MediaDecoderBufferPtr mojo_buffer(MediaDecoderBuffer::New());
if (input->end_of_stream())
return mojo_buffer.Pass();
mojo_buffer->timestamp_usec = input->timestamp().InMicroseconds();
mojo_buffer->duration_usec = input->duration().InMicroseconds();
+ mojo_buffer->is_key_frame = input->is_key_frame();
mojo_buffer->data_size = input->data_size();
mojo_buffer->side_data_size = input->side_data_size();
mojo_buffer->front_discard_usec =
@@ -229,28 +328,18 @@ MediaDecoderBufferPtr TypeConverter<MediaDecoderBufferPtr,
mojo_buffer->splice_timestamp_usec =
input->splice_timestamp().InMicroseconds();
- // TODO(tim): Assuming this is small so allowing extra copies.
- std::vector<uint8> side_data(input->side_data(),
- input->side_data() + input->side_data_size());
+ // Note: The side data is always small, so this copy is okay.
+ std::vector<uint8_t> side_data(input->side_data(),
+ input->side_data() + input->side_data_size());
mojo_buffer->side_data.Swap(&side_data);
- MojoCreateDataPipeOptions options;
- options.struct_size = sizeof(MojoCreateDataPipeOptions);
- options.flags = MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE;
- options.element_num_bytes = 1;
- options.capacity_num_bytes = input->data_size();
- DataPipe data_pipe(options);
- mojo_buffer->data = data_pipe.consumer_handle.Pass();
-
- uint32_t num_bytes = input->data_size();
- // TODO(tim): ALL_OR_NONE isn't really appropriate. Check success?
- // If fails, we'd still return the buffer, but we'd need to HandleWatch
- // to fill the pipe at a later time, which means the de-marshalling code
- // needs to wait for a readable pipe (which it currently doesn't).
- WriteDataRaw(data_pipe.producer_handle.get(),
- input->data(),
- &num_bytes,
- MOJO_WRITE_DATA_FLAG_ALL_OR_NONE);
+ if (input->decrypt_config())
+ mojo_buffer->decrypt_config = DecryptConfig::From(*input->decrypt_config());
+
+ // TODO(dalecurtis): We intentionally do not serialize the data section of
+ // the DecoderBuffer here; this must instead be done by clients via their
+ // own DataPipe. See http://crbug.com/432960
+
return mojo_buffer.Pass();
}
@@ -258,44 +347,38 @@ MediaDecoderBufferPtr TypeConverter<MediaDecoderBufferPtr,
scoped_refptr<media::DecoderBuffer> TypeConverter<
scoped_refptr<media::DecoderBuffer>, MediaDecoderBufferPtr>::Convert(
const MediaDecoderBufferPtr& input) {
- if (!input->data.is_valid())
+ if (!input->data_size)
return media::DecoderBuffer::CreateEOSBuffer();
- uint32_t num_bytes = 0;
- // TODO(tim): We're assuming that because we always write to the pipe above
- // before sending the MediaDecoderBuffer that the pipe is readable when
- // we get here.
- ReadDataRaw(input->data.get(), NULL, &num_bytes, MOJO_READ_DATA_FLAG_QUERY);
- CHECK_EQ(num_bytes, input->data_size) << "Pipe error converting buffer";
-
- scoped_ptr<uint8[]> data(new uint8[num_bytes]); // Uninitialized.
- ReadDataRaw(input->data.get(), data.get(), &num_bytes,
- MOJO_READ_DATA_FLAG_ALL_OR_NONE);
- CHECK_EQ(num_bytes, input->data_size) << "Pipe error converting buffer";
-
- // TODO(tim): We can't create a media::DecoderBuffer that has side_data
- // without copying data because it wants to ensure alignment. Could we
- // read directly into a pre-padded DecoderBuffer?
- scoped_refptr<media::DecoderBuffer> buffer;
- if (input->side_data_size) {
- buffer = media::DecoderBuffer::CopyFrom(data.get(),
- num_bytes,
- &input->side_data.front(),
- input->side_data_size);
- } else {
- buffer = media::DecoderBuffer::CopyFrom(data.get(), num_bytes);
- }
+ scoped_refptr<media::DecoderBuffer> buffer(
+ new media::DecoderBuffer(input->data_size));
+ if (input->side_data_size)
+ buffer->CopySideDataFrom(&input->side_data.front(), input->side_data_size);
buffer->set_timestamp(
base::TimeDelta::FromMicroseconds(input->timestamp_usec));
buffer->set_duration(
base::TimeDelta::FromMicroseconds(input->duration_usec));
+
+ if (input->is_key_frame)
+ buffer->set_is_key_frame(true);
+
+ if (input->decrypt_config) {
+ buffer->set_decrypt_config(
+ input->decrypt_config.To<scoped_ptr<media::DecryptConfig>>());
+ }
+
media::DecoderBuffer::DiscardPadding discard_padding(
base::TimeDelta::FromMicroseconds(input->front_discard_usec),
base::TimeDelta::FromMicroseconds(input->back_discard_usec));
buffer->set_discard_padding(discard_padding);
buffer->set_splice_timestamp(
base::TimeDelta::FromMicroseconds(input->splice_timestamp_usec));
+
+ // TODO(dalecurtis): We intentionally do not deserialize the data section of
+ // the DecoderBuffer here; this must instead be done by clients via their
+ // own DataPipe. See http://crbug.com/432960
+
return buffer;
}
@@ -311,12 +394,13 @@ TypeConverter<AudioDecoderConfigPtr, media::AudioDecoderConfig>::Convert(
static_cast<ChannelLayout>(input.channel_layout());
config->samples_per_second = input.samples_per_second();
if (input.extra_data()) {
- std::vector<uint8> data(input.extra_data(),
- input.extra_data() + input.extra_data_size());
+ std::vector<uint8_t> data(input.extra_data(),
+ input.extra_data() + input.extra_data_size());
config->extra_data.Swap(&data);
}
config->seek_preroll_usec = input.seek_preroll().InMicroseconds();
config->codec_delay = input.codec_delay();
+ config->is_encrypted = input.is_encrypted();
return config.Pass();
}
@@ -332,7 +416,7 @@ TypeConverter<media::AudioDecoderConfig, AudioDecoderConfigPtr>::Convert(
input->samples_per_second,
input->extra_data.size() ? &input->extra_data.front() : NULL,
input->extra_data.size(),
- false,
+ input->is_encrypted,
false,
base::TimeDelta::FromMicroseconds(input->seek_preroll_usec),
input->codec_delay);
@@ -351,8 +435,8 @@ TypeConverter<VideoDecoderConfigPtr, media::VideoDecoderConfig>::Convert(
config->visible_rect = Rect::From(input.visible_rect());
config->natural_size = Size::From(input.natural_size());
if (input.extra_data()) {
- std::vector<uint8> data(input.extra_data(),
- input.extra_data() + input.extra_data_size());
+ std::vector<uint8_t> data(input.extra_data(),
+ input.extra_data() + input.extra_data_size());
config->extra_data.Swap(&data);
}
config->is_encrypted = input.is_encrypted();
@@ -378,4 +462,28 @@ TypeConverter<media::VideoDecoderConfig, VideoDecoderConfigPtr>::Convert(
return config;
}
+// static
+CdmKeyInformationPtr
+TypeConverter<CdmKeyInformationPtr, media::CdmKeyInformation>::Convert(
+ const media::CdmKeyInformation& input) {
+ CdmKeyInformationPtr info(CdmKeyInformation::New());
+ std::vector<uint8_t> key_id_copy(input.key_id);
+ info->key_id.Swap(&key_id_copy);
+ info->status = static_cast<CdmKeyStatus>(input.status);
+ info->system_code = input.system_code;
+ return info.Pass();
+}
+
+// static
+scoped_ptr<media::CdmKeyInformation> TypeConverter<
+ scoped_ptr<media::CdmKeyInformation>,
+ CdmKeyInformationPtr>::Convert(const CdmKeyInformationPtr& input) {
+ scoped_ptr<media::CdmKeyInformation> info(new media::CdmKeyInformation);
+ info->key_id = input->key_id.storage();
+ info->status =
+ static_cast<media::CdmKeyInformation::KeyStatus>(input->status);
+ info->system_code = input->system_code;
+ return info.Pass();
+}
+
} // namespace mojo
diff --git a/chromium/media/mojo/services/media_type_converters.h b/chromium/media/mojo/services/media_type_converters.h
index 9901865c63e..14a4562701f 100644
--- a/chromium/media/mojo/services/media_type_converters.h
+++ b/chromium/media/mojo/services/media_type_converters.h
@@ -6,19 +6,43 @@
#define MEDIA_MOJO_SERVICES_MEDIA_TYPE_CONVERTERS_H_
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/mojo/interfaces/content_decryption_module.mojom.h"
#include "media/mojo/interfaces/media_types.mojom.h"
namespace media {
class AudioDecoderConfig;
class VideoDecoderConfig;
class DecoderBuffer;
+class DecryptConfig;
+struct SubsampleEntry;
+struct CdmKeyInformation;
}
namespace mojo {
template <>
+struct TypeConverter<SubsampleEntryPtr, media::SubsampleEntry> {
+ static SubsampleEntryPtr Convert(const media::SubsampleEntry& input);
+};
+template <>
+struct TypeConverter<media::SubsampleEntry, SubsampleEntryPtr> {
+ static media::SubsampleEntry Convert(const SubsampleEntryPtr& input);
+};
+
+template <>
+struct TypeConverter<DecryptConfigPtr, media::DecryptConfig> {
+ static DecryptConfigPtr Convert(const media::DecryptConfig& input);
+};
+template <>
+struct TypeConverter<scoped_ptr<media::DecryptConfig>, DecryptConfigPtr> {
+ static scoped_ptr<media::DecryptConfig> Convert(
+ const DecryptConfigPtr& input);
+};
+
+template <>
struct TypeConverter<MediaDecoderBufferPtr,
- scoped_refptr<media::DecoderBuffer> > {
+ scoped_refptr<media::DecoderBuffer>> {
static MediaDecoderBufferPtr Convert(
const scoped_refptr<media::DecoderBuffer>& input);
};
@@ -47,6 +71,17 @@ struct TypeConverter<media::VideoDecoderConfig, VideoDecoderConfigPtr> {
static media::VideoDecoderConfig Convert(const VideoDecoderConfigPtr& input);
};
+template <>
+struct TypeConverter<CdmKeyInformationPtr, media::CdmKeyInformation> {
+ static CdmKeyInformationPtr Convert(const media::CdmKeyInformation& input);
+};
+template <>
+struct TypeConverter<scoped_ptr<media::CdmKeyInformation>,
+ CdmKeyInformationPtr> {
+ static scoped_ptr<media::CdmKeyInformation> Convert(
+ const CdmKeyInformationPtr& input);
+};
+
} // namespace mojo
#endif // MEDIA_MOJO_SERVICES_MEDIA_TYPE_CONVERTERS_H_
diff --git a/chromium/media/mojo/services/media_type_converters_unittest.cc b/chromium/media/mojo/services/media_type_converters_unittest.cc
index 1bf9b854e88..ac05e142da2 100644
--- a/chromium/media/mojo/services/media_type_converters_unittest.cc
+++ b/chromium/media/mojo/services/media_type_converters_unittest.cc
@@ -35,14 +35,20 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_Normal) {
scoped_refptr<DecoderBuffer> result(ptr.To<scoped_refptr<DecoderBuffer>>());
// Compare.
+ // Note: We intentionally do not serialize the data section of the
+ // DecoderBuffer; no need to check the data here.
EXPECT_EQ(kDataSize, result->data_size());
- EXPECT_EQ(0, memcmp(result->data(), kData, kDataSize));
EXPECT_EQ(kSideDataSize, result->side_data_size());
EXPECT_EQ(0, memcmp(result->side_data(), kSideData, kSideDataSize));
EXPECT_EQ(buffer->timestamp(), result->timestamp());
EXPECT_EQ(buffer->duration(), result->duration());
+ EXPECT_EQ(buffer->is_key_frame(), result->is_key_frame());
EXPECT_EQ(buffer->splice_timestamp(), result->splice_timestamp());
EXPECT_EQ(buffer->discard_padding(), result->discard_padding());
+
+ // Both |buffer| and |result| are not encrypted.
+ EXPECT_FALSE(buffer->decrypt_config());
+ EXPECT_FALSE(result->decrypt_config());
}
TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EOS) {
@@ -57,6 +63,62 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EOS) {
EXPECT_TRUE(result->end_of_stream());
}
+TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_KeyFrame) {
+ const uint8 kData[] = "hello, world";
+ const int kDataSize = arraysize(kData);
+
+ // Original.
+ scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(&kData), kDataSize));
+ buffer->set_is_key_frame(true);
+ EXPECT_TRUE(buffer->is_key_frame());
+
+ // Convert from and back.
+ MediaDecoderBufferPtr ptr(MediaDecoderBuffer::From(buffer));
+ scoped_refptr<DecoderBuffer> result(ptr.To<scoped_refptr<DecoderBuffer>>());
+
+ // Compare.
+ // Note: We intentionally do not serialize the data section of the
+ // DecoderBuffer; no need to check the data here.
+ EXPECT_EQ(kDataSize, result->data_size());
+ EXPECT_TRUE(result->is_key_frame());
+}
+
+TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EncryptedBuffer) {
+ const uint8 kData[] = "hello, world";
+ const int kDataSize = arraysize(kData);
+ const char kKeyId[] = "00112233445566778899aabbccddeeff";
+ const char kIv[] = "0123456789abcdef";
+
+ std::vector<media::SubsampleEntry> subsamples;
+ subsamples.push_back(media::SubsampleEntry(10, 20));
+ subsamples.push_back(media::SubsampleEntry(30, 40));
+ subsamples.push_back(media::SubsampleEntry(50, 60));
+
+ // Original.
+ scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(&kData), kDataSize));
+ buffer->set_decrypt_config(
+ make_scoped_ptr(new media::DecryptConfig(kKeyId, kIv, subsamples)));
+
+ // Convert from and back.
+ MediaDecoderBufferPtr ptr(MediaDecoderBuffer::From(buffer));
+ scoped_refptr<DecoderBuffer> result(ptr.To<scoped_refptr<DecoderBuffer>>());
+
+ // Compare.
+ // Note: We intentionally do not serialize the data section of the
+ // DecoderBuffer; no need to check the data here.
+ EXPECT_EQ(kDataSize, result->data_size());
+ EXPECT_TRUE(buffer->decrypt_config()->Matches(*result->decrypt_config()));
+
+ // Test empty IV. This is used for clear buffer in an encrypted stream.
+ buffer->set_decrypt_config(make_scoped_ptr(new media::DecryptConfig(
+ kKeyId, "", std::vector<media::SubsampleEntry>())));
+ result = MediaDecoderBuffer::From(buffer).To<scoped_refptr<DecoderBuffer>>();
+ EXPECT_TRUE(buffer->decrypt_config()->Matches(*result->decrypt_config()));
+ EXPECT_TRUE(buffer->decrypt_config()->iv().empty());
+}
+
// TODO(tim): Check other properties.
TEST(MediaTypeConvertersTest, ConvertAudioDecoderConfig_Normal) {
@@ -95,5 +157,22 @@ TEST(MediaTypeConvertersTest, ConvertAudioDecoderConfig_NullExtraData) {
EXPECT_TRUE(result.Matches(config));
}
+TEST(MediaTypeConvertersTest, ConvertAudioDecoderConfig_Encrypted) {
+ media::AudioDecoderConfig config;
+ config.Initialize(media::kCodecAAC,
+ media::kSampleFormatU8,
+ media::CHANNEL_LAYOUT_SURROUND,
+ 48000,
+ NULL,
+ 0,
+ true, // Is encrypted.
+ false,
+ base::TimeDelta(),
+ 0);
+ AudioDecoderConfigPtr ptr(AudioDecoderConfig::From(config));
+ media::AudioDecoderConfig result(ptr.To<media::AudioDecoderConfig>());
+ EXPECT_TRUE(result.Matches(config));
+}
+
} // namespace test
} // namespace mojo
diff --git a/chromium/media/mojo/services/mojo_cdm.cc b/chromium/media/mojo/services/mojo_cdm.cc
new file mode 100644
index 00000000000..e7587115e5f
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_cdm.cc
@@ -0,0 +1,164 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/services/mojo_cdm.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "media/base/cdm_key_information.h"
+#include "media/base/cdm_promise.h"
+#include "media/mojo/services/media_type_converters.h"
+#include "mojo/application/public/cpp/connect.h"
+#include "mojo/application/public/interfaces/service_provider.mojom.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_impl.h"
+#include "url/gurl.h"
+
+namespace media {
+
+template <typename PromiseType>
+static void RejectPromise(scoped_ptr<PromiseType> promise,
+ mojo::CdmPromiseResultPtr result) {
+ promise->reject(static_cast<MediaKeys::Exception>(result->exception),
+ result->system_code, result->error_message);
+}
+
+MojoCdm::MojoCdm(mojo::ContentDecryptionModulePtr remote_cdm,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb)
+ : remote_cdm_(remote_cdm.Pass()),
+ binding_(this),
+ session_message_cb_(session_message_cb),
+ session_closed_cb_(session_closed_cb),
+ legacy_session_error_cb_(legacy_session_error_cb),
+ session_keys_change_cb_(session_keys_change_cb),
+ session_expiration_update_cb_(session_expiration_update_cb),
+ weak_factory_(this) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(!session_message_cb_.is_null());
+ DCHECK(!session_closed_cb_.is_null());
+ DCHECK(!legacy_session_error_cb_.is_null());
+ DCHECK(!session_keys_change_cb_.is_null());
+ DCHECK(!session_expiration_update_cb_.is_null());
+
+ mojo::ContentDecryptionModuleClientPtr client_ptr;
+ binding_.Bind(GetProxy(&client_ptr));
+ remote_cdm_->SetClient(client_ptr.Pass());
+}
+
+MojoCdm::~MojoCdm() {
+ DVLOG(1) << __FUNCTION__;
+}
+
+void MojoCdm::SetServerCertificate(const std::vector<uint8_t>& certificate,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ remote_cdm_->SetServerCertificate(
+ mojo::Array<uint8_t>::From(certificate),
+ base::Bind(&MojoCdm::OnPromiseResult<>, weak_factory_.GetWeakPtr(),
+ base::Passed(&promise)));
+}
+
+void MojoCdm::CreateSessionAndGenerateRequest(
+ SessionType session_type,
+ EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ scoped_ptr<NewSessionCdmPromise> promise) {
+ remote_cdm_->CreateSessionAndGenerateRequest(
+ static_cast<mojo::ContentDecryptionModule::SessionType>(session_type),
+ static_cast<mojo::ContentDecryptionModule::InitDataType>(init_data_type),
+ mojo::Array<uint8_t>::From(init_data),
+ base::Bind(&MojoCdm::OnPromiseResult<std::string>,
+ weak_factory_.GetWeakPtr(), base::Passed(&promise)));
+}
+
+void MojoCdm::LoadSession(SessionType session_type,
+ const std::string& session_id,
+ scoped_ptr<NewSessionCdmPromise> promise) {
+ remote_cdm_->LoadSession(
+ static_cast<mojo::ContentDecryptionModule::SessionType>(session_type),
+ session_id,
+ base::Bind(&MojoCdm::OnPromiseResult<std::string>,
+ weak_factory_.GetWeakPtr(), base::Passed(&promise)));
+}
+
+void MojoCdm::UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ remote_cdm_->UpdateSession(
+ session_id, mojo::Array<uint8_t>::From(response),
+ base::Bind(&MojoCdm::OnPromiseResult<>, weak_factory_.GetWeakPtr(),
+ base::Passed(&promise)));
+}
+
+void MojoCdm::CloseSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ remote_cdm_->CloseSession(session_id, base::Bind(&MojoCdm::OnPromiseResult<>,
+ weak_factory_.GetWeakPtr(),
+ base::Passed(&promise)));
+}
+
+void MojoCdm::RemoveSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise) {
+ remote_cdm_->RemoveSession(session_id, base::Bind(&MojoCdm::OnPromiseResult<>,
+ weak_factory_.GetWeakPtr(),
+ base::Passed(&promise)));
+}
+
+CdmContext* MojoCdm::GetCdmContext() {
+ NOTIMPLEMENTED();
+ return nullptr;
+}
+
+void MojoCdm::OnSessionMessage(const mojo::String& session_id,
+ mojo::CdmMessageType message_type,
+ mojo::Array<uint8_t> message,
+ const mojo::String& legacy_destination_url) {
+ GURL verified_gurl = GURL(legacy_destination_url);
+ if (!verified_gurl.is_valid() && !verified_gurl.is_empty()) {
+ DLOG(WARNING) << "SessionMessage destination_url is invalid : "
+ << verified_gurl.possibly_invalid_spec();
+ verified_gurl = GURL::EmptyGURL(); // Replace invalid destination_url.
+ }
+
+ session_message_cb_.Run(session_id,
+ static_cast<MediaKeys::MessageType>(message_type),
+ message.storage(), verified_gurl);
+}
+
+void MojoCdm::OnSessionClosed(const mojo::String& session_id) {
+ session_closed_cb_.Run(session_id);
+}
+
+void MojoCdm::OnLegacySessionError(const mojo::String& session_id,
+ mojo::CdmException exception,
+ uint32_t system_code,
+ const mojo::String& error_message) {
+ legacy_session_error_cb_.Run(session_id,
+ static_cast<MediaKeys::Exception>(exception),
+ system_code, error_message);
+}
+
+void MojoCdm::OnSessionKeysChange(
+ const mojo::String& session_id,
+ bool has_additional_usable_key,
+ mojo::Array<mojo::CdmKeyInformationPtr> keys_info) {
+ media::CdmKeysInfo key_data;
+ key_data.reserve(keys_info.size());
+ for (size_t i = 0; i < keys_info.size(); ++i) {
+ key_data.push_back(
+ keys_info[i].To<scoped_ptr<media::CdmKeyInformation>>().release());
+ }
+ session_keys_change_cb_.Run(session_id, has_additional_usable_key,
+ key_data.Pass());
+}
+
+void MojoCdm::OnSessionExpirationUpdate(const mojo::String& session_id,
+ double new_expiry_time_sec) {
+ session_expiration_update_cb_.Run(
+ session_id, base::Time::FromDoubleT(new_expiry_time_sec));
+}
+
+} // namespace media
diff --git a/chromium/media/mojo/services/mojo_cdm.h b/chromium/media/mojo/services/mojo_cdm.h
new file mode 100644
index 00000000000..68667dab6cc
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_cdm.h
@@ -0,0 +1,106 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_SERVICES_MOJO_CDM_H_
+#define MEDIA_MOJO_SERVICES_MOJO_CDM_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/media_keys.h"
+#include "media/mojo/interfaces/content_decryption_module.mojom.h"
+#include "media/mojo/services/mojo_type_trait.h"
+
+namespace mojo {
+class ServiceProvider;
+}
+
+namespace media {
+
+// A MediaKeys that proxies to a mojo::ContentDecryptionModule. That
+// mojo::ContentDecryptionModule proxies back to the MojoCdm via the
+// mojo::ContentDecryptionModuleClient interface.
+class MojoCdm : public MediaKeys, public mojo::ContentDecryptionModuleClient {
+ public:
+ // |media_renderer_provider| is a ServiceProvider from a connected
+ // Application that is hosting a mojo::MediaRenderer.
+ MojoCdm(mojo::ContentDecryptionModulePtr remote_cdm,
+ const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb,
+ const LegacySessionErrorCB& legacy_session_error_cb,
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb);
+ ~MojoCdm() final;
+
+ // MediaKeys implementation.
+ void SetServerCertificate(const std::vector<uint8_t>& certificate,
+ scoped_ptr<SimpleCdmPromise> promise) final;
+ void CreateSessionAndGenerateRequest(
+ SessionType session_type,
+ EmeInitDataType init_data_type,
+ const std::vector<uint8_t>& init_data,
+ scoped_ptr<NewSessionCdmPromise> promise) final;
+ void LoadSession(SessionType session_type,
+ const std::string& session_id,
+ scoped_ptr<NewSessionCdmPromise> promise) final;
+ void UpdateSession(const std::string& session_id,
+ const std::vector<uint8_t>& response,
+ scoped_ptr<SimpleCdmPromise> promise) final;
+ void CloseSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise) final;
+ void RemoveSession(const std::string& session_id,
+ scoped_ptr<SimpleCdmPromise> promise) final;
+ CdmContext* GetCdmContext() final;
+
+ private:
+ // mojo::ContentDecryptionModuleClient implementation.
+ void OnSessionMessage(const mojo::String& session_id,
+ mojo::CdmMessageType message_type,
+ mojo::Array<uint8_t> message,
+ const mojo::String& legacy_destination_url) final;
+ void OnSessionClosed(const mojo::String& session_id) final;
+ void OnLegacySessionError(const mojo::String& session_id,
+ mojo::CdmException exception,
+ uint32_t system_code,
+ const mojo::String& error_message) final;
+ void OnSessionKeysChange(
+ const mojo::String& session_id,
+ bool has_additional_usable_key,
+ mojo::Array<mojo::CdmKeyInformationPtr> keys_info) final;
+ void OnSessionExpirationUpdate(const mojo::String& session_id,
+ double new_expiry_time_sec) final;
+
+ // Callbacks to handle CDM promises.
+ // We have to inline this method, since MS VS 2013 compiler fails to compile
+ // it when this method is not inlined. It fails with error C2244
+ // "unable to match function definition to an existing declaration".
+ template <typename... T>
+ void OnPromiseResult(scoped_ptr<CdmPromiseTemplate<T...>> promise,
+ mojo::CdmPromiseResultPtr result,
+ typename MojoTypeTrait<T>::MojoType... args) {
+ if (result->success)
+ promise->resolve(args.template To<T>()...); // See ISO C++03 14.2/4.
+ else
+ RejectPromise(promise.Pass(), result.Pass());
+ }
+
+ mojo::ContentDecryptionModulePtr remote_cdm_;
+ mojo::Binding<ContentDecryptionModuleClient> binding_;
+
+ // Callbacks for firing session events.
+ SessionMessageCB session_message_cb_;
+ SessionClosedCB session_closed_cb_;
+ LegacySessionErrorCB legacy_session_error_cb_;
+ SessionKeysChangeCB session_keys_change_cb_;
+ SessionExpirationUpdateCB session_expiration_update_cb_;
+
+ base::WeakPtrFactory<MojoCdm> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MojoCdm);
+};
+
+} // namespace media
+
+#endif // MEDIA_MOJO_SERVICES_MOJO_CDM_H_
diff --git a/chromium/media/mojo/services/mojo_cdm_promise.cc b/chromium/media/mojo/services/mojo_cdm_promise.cc
new file mode 100644
index 00000000000..4f17f117ead
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_cdm_promise.cc
@@ -0,0 +1,64 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/services/mojo_cdm_promise.h"
+
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "media/base/decryptor.h"
+#include "media/base/media_keys.h"
+
+namespace media {
+
+static mojo::CdmPromiseResultPtr GetRejectResult(
+ MediaKeys::Exception exception,
+ uint32_t system_code,
+ const std::string& error_message) {
+ mojo::CdmPromiseResultPtr cdm_promise_result(mojo::CdmPromiseResult::New());
+ cdm_promise_result->success = false;
+ cdm_promise_result->exception = static_cast<mojo::CdmException>(exception);
+ cdm_promise_result->system_code = system_code;
+ cdm_promise_result->error_message = error_message;
+ return cdm_promise_result.Pass();
+}
+
+template <typename... T>
+MojoCdmPromise<T...>::MojoCdmPromise(const CallbackType& callback)
+ : callback_(callback) {
+ DCHECK(!callback_.is_null());
+}
+
+template <typename... T>
+MojoCdmPromise<T...>::~MojoCdmPromise() {
+ if (!callback_.is_null())
+ DVLOG(1) << "Promise not resolved before destruction.";
+}
+
+template <typename... T>
+void MojoCdmPromise<T...>::resolve(const T&... result) {
+ MarkPromiseSettled();
+ mojo::CdmPromiseResultPtr cdm_promise_result(mojo::CdmPromiseResult::New());
+ cdm_promise_result->success = true;
+ callback_.Run(cdm_promise_result.Pass(),
+ MojoTypeTrait<T>::MojoType::From(result)...);
+ callback_.reset();
+}
+
+template <typename... T>
+void MojoCdmPromise<T...>::reject(MediaKeys::Exception exception,
+ uint32_t system_code,
+ const std::string& error_message) {
+ MarkPromiseSettled();
+ callback_.Run(GetRejectResult(exception, system_code, error_message),
+ MojoTypeTrait<T>::DefaultValue()...);
+ callback_.reset();
+}
+
+template class MojoCdmPromise<>;
+template class MojoCdmPromise<std::string>;
+
+} // namespace media
diff --git a/chromium/media/mojo/services/mojo_cdm_promise.h b/chromium/media/mojo/services/mojo_cdm_promise.h
new file mode 100644
index 00000000000..9e6a507a031
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_cdm_promise.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_SERVICES_MOJO_CDM_PROMISE_H_
+#define MEDIA_MOJO_SERVICES_MOJO_CDM_PROMISE_H_
+
+#include "base/macros.h"
+#include "media/base/cdm_promise.h"
+#include "media/mojo/interfaces/content_decryption_module.mojom.h"
+#include "media/mojo/services/mojo_type_trait.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/callback.h"
+
+namespace media {
+
+// media::CdmPromiseTemplate implementations backed by mojo::Callbacks.
+template <typename... T>
+class MojoCdmPromise : public CdmPromiseTemplate<T...> {
+ public:
+ typedef mojo::Callback<void(mojo::CdmPromiseResultPtr,
+ typename MojoTypeTrait<T>::MojoType...)>
+ CallbackType;
+
+ MojoCdmPromise(const CallbackType& callback);
+ ~MojoCdmPromise() final;
+
+ // CdmPromiseTemplate<> implementation.
+ void resolve(const T&... result) final;
+ void reject(MediaKeys::Exception exception,
+ uint32_t system_code,
+ const std::string& error_message) final;
+
+ private:
+ using media::CdmPromiseTemplate<T...>::MarkPromiseSettled;
+
+ CallbackType callback_;
+};
+
+} // namespace media
+
+#endif // MEDIA_MOJO_SERVICES_MOJO_CDM_PROMISE_H_
diff --git a/chromium/media/mojo/services/mojo_cdm_service.cc b/chromium/media/mojo/services/mojo_cdm_service.cc
new file mode 100644
index 00000000000..b92d9f8748f
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_cdm_service.cc
@@ -0,0 +1,149 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/services/mojo_cdm_service.h"
+
+#include "base/bind.h"
+#include "media/base/cdm_key_information.h"
+#include "media/base/key_systems.h"
+#include "media/cdm/aes_decryptor.h"
+#include "media/mojo/services/media_type_converters.h"
+#include "media/mojo/services/mojo_cdm_promise.h"
+#include "mojo/common/common_type_converters.h"
+#include "mojo/common/url_type_converters.h"
+#include "url/gurl.h"
+
+namespace media {
+
+typedef MojoCdmPromise<> SimpleMojoCdmPromise;
+typedef MojoCdmPromise<std::string> NewSessionMojoCdmPromise;
+
+MojoCdmService::MojoCdmService(const mojo::String& key_system)
+ : weak_factory_(this) {
+ base::WeakPtr<MojoCdmService> weak_this = weak_factory_.GetWeakPtr();
+
+ if (CanUseAesDecryptor(key_system)) {
+ // TODO(jrummell): Determine proper origin.
+ cdm_.reset(new AesDecryptor(
+ GURL::EmptyGURL(),
+ base::Bind(&MojoCdmService::OnSessionMessage, weak_this),
+ base::Bind(&MojoCdmService::OnSessionClosed, weak_this),
+ base::Bind(&MojoCdmService::OnSessionKeysChange, weak_this)));
+ }
+
+ // TODO(xhwang): Check key system support in the app.
+ NOTREACHED();
+}
+
+MojoCdmService::~MojoCdmService() {
+}
+
+void MojoCdmService::SetClient(mojo::ContentDecryptionModuleClientPtr client) {
+ client_ = client.Pass();
+}
+
+// mojo::MediaRenderer implementation.
+void MojoCdmService::SetServerCertificate(
+ mojo::Array<uint8_t> certificate_data,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ cdm_->SetServerCertificate(
+ certificate_data.storage(),
+ scoped_ptr<SimpleCdmPromise>(new SimpleMojoCdmPromise(callback)));
+}
+
+void MojoCdmService::CreateSessionAndGenerateRequest(
+ mojo::ContentDecryptionModule::SessionType session_type,
+ mojo::ContentDecryptionModule::InitDataType init_data_type,
+ mojo::Array<uint8_t> init_data,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr, mojo::String)>&
+ callback) {
+ cdm_->CreateSessionAndGenerateRequest(
+ static_cast<MediaKeys::SessionType>(session_type),
+ static_cast<EmeInitDataType>(init_data_type), init_data.storage(),
+ scoped_ptr<NewSessionCdmPromise>(new NewSessionMojoCdmPromise(callback)));
+}
+
+void MojoCdmService::LoadSession(
+ mojo::ContentDecryptionModule::SessionType session_type,
+ const mojo::String& session_id,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr, mojo::String)>&
+ callback) {
+ cdm_->LoadSession(
+ static_cast<MediaKeys::SessionType>(session_type),
+ session_id.To<std::string>(),
+ scoped_ptr<NewSessionCdmPromise>(new NewSessionMojoCdmPromise(callback)));
+}
+
+void MojoCdmService::UpdateSession(
+ const mojo::String& session_id,
+ mojo::Array<uint8_t> response,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ cdm_->UpdateSession(
+ session_id.To<std::string>(), response.storage(),
+ scoped_ptr<SimpleCdmPromise>(new SimpleMojoCdmPromise(callback)));
+}
+
+void MojoCdmService::CloseSession(
+ const mojo::String& session_id,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ cdm_->CloseSession(
+ session_id.To<std::string>(),
+ scoped_ptr<SimpleCdmPromise>(new SimpleMojoCdmPromise(callback)));
+}
+
+void MojoCdmService::RemoveSession(
+ const mojo::String& session_id,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ cdm_->RemoveSession(
+ session_id.To<std::string>(),
+ scoped_ptr<SimpleCdmPromise>(new SimpleMojoCdmPromise(callback)));
+}
+
+void MojoCdmService::GetCdmContext(
+ int32_t cdm_id,
+ mojo::InterfaceRequest<mojo::Decryptor> decryptor) {
+ NOTIMPLEMENTED();
+}
+
+void MojoCdmService::OnSessionMessage(const std::string& session_id,
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8_t>& message,
+ const GURL& legacy_destination_url) {
+ client_->OnSessionMessage(session_id,
+ static_cast<mojo::CdmMessageType>(message_type),
+ mojo::Array<uint8_t>::From(message),
+ mojo::String::From(legacy_destination_url));
+}
+
+void MojoCdmService::OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info) {
+ mojo::Array<mojo::CdmKeyInformationPtr> keys_data;
+ for (const auto& key : keys_info)
+ keys_data.push_back(mojo::CdmKeyInformation::From(*key));
+ client_->OnSessionKeysChange(session_id, has_additional_usable_key,
+ keys_data.Pass());
+}
+
+void MojoCdmService::OnSessionExpirationUpdate(
+ const std::string& session_id,
+ const base::Time& new_expiry_time_sec) {
+ client_->OnSessionExpirationUpdate(session_id,
+ new_expiry_time_sec.ToDoubleT());
+}
+
+void MojoCdmService::OnSessionClosed(const std::string& session_id) {
+ client_->OnSessionClosed(session_id);
+}
+
+void MojoCdmService::OnLegacySessionError(const std::string& session_id,
+ MediaKeys::Exception exception,
+ uint32_t system_code,
+ const std::string& error_message) {
+ client_->OnLegacySessionError(session_id,
+ static_cast<mojo::CdmException>(exception),
+ system_code, error_message);
+}
+
+} // namespace media
diff --git a/chromium/media/mojo/services/mojo_cdm_service.h b/chromium/media/mojo/services/mojo_cdm_service.h
new file mode 100644
index 00000000000..a1b1ea0e1bf
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_cdm_service.h
@@ -0,0 +1,82 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_SERVICES_MOJO_CDM_SERVICE_H_
+#define MEDIA_MOJO_SERVICES_MOJO_CDM_SERVICE_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/media_keys.h"
+#include "media/mojo/interfaces/content_decryption_module.mojom.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_impl.h"
+
+namespace media {
+
+// A mojo::ContentDecryptionModule implementation backed by a media::MediaKeys.
+class MojoCdmService
+ : public mojo::InterfaceImpl<mojo::ContentDecryptionModule> {
+ public:
+ MojoCdmService(const mojo::String& key_system);
+ ~MojoCdmService() final;
+
+ // mojo::ContentDecryptionModule implementation.
+ void SetClient(mojo::ContentDecryptionModuleClientPtr client) final;
+ void SetServerCertificate(
+ mojo::Array<uint8_t> certificate_data,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
+ void CreateSessionAndGenerateRequest(
+ mojo::ContentDecryptionModule::SessionType session_type,
+ mojo::ContentDecryptionModule::InitDataType init_data_type,
+ mojo::Array<uint8_t> init_data,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr, mojo::String)>&
+ callback) final;
+ void LoadSession(mojo::ContentDecryptionModule::SessionType session_type,
+ const mojo::String& session_id,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr,
+ mojo::String)>& callback) final;
+ void UpdateSession(
+ const mojo::String& session_id,
+ mojo::Array<uint8_t> response,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
+ void CloseSession(
+ const mojo::String& session_id,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
+ void RemoveSession(
+ const mojo::String& session_id,
+ const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
+ void GetCdmContext(int32_t cdm_id,
+ mojo::InterfaceRequest<mojo::Decryptor> decryptor) final;
+
+ private:
+ // Callbacks for firing session events.
+ void OnSessionMessage(const std::string& session_id,
+ MediaKeys::MessageType message_type,
+ const std::vector<uint8_t>& message,
+ const GURL& legacy_destination_url);
+ void OnSessionKeysChange(const std::string& session_id,
+ bool has_additional_usable_key,
+ CdmKeysInfo keys_info);
+ void OnSessionExpirationUpdate(const std::string& session_id,
+ const base::Time& new_expiry_time);
+ void OnSessionClosed(const std::string& session_id);
+ void OnLegacySessionError(const std::string& session_id,
+ MediaKeys::Exception exception,
+ uint32_t system_code,
+ const std::string& error_message);
+
+ scoped_ptr<MediaKeys> cdm_;
+
+ mojo::ContentDecryptionModuleClientPtr client_;
+
+ base::WeakPtr<MojoCdmService> weak_this_;
+ base::WeakPtrFactory<MojoCdmService> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MojoCdmService);
+};
+
+} // namespace media
+
+#endif // MEDIA_MOJO_SERVICES_MOJO_CDM_SERVICE_H_
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc
index c1d20246629..9817ae3e2d5 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc
@@ -8,6 +8,7 @@
#include "base/callback_helpers.h"
#include "media/base/decoder_buffer.h"
#include "media/mojo/services/media_type_converters.h"
+#include "third_party/mojo/src/mojo/public/cpp/system/data_pipe.h"
namespace media {
@@ -19,7 +20,8 @@ MojoDemuxerStreamAdapter::MojoDemuxerStreamAdapter(
type_(DemuxerStream::UNKNOWN),
weak_factory_(this) {
DVLOG(1) << __FUNCTION__;
- demuxer_stream_.set_client(this);
+ demuxer_stream_->Initialize(base::Bind(
+ &MojoDemuxerStreamAdapter::OnStreamReady, weak_factory_.GetWeakPtr()));
}
MojoDemuxerStreamAdapter::~MojoDemuxerStreamAdapter() {
@@ -30,6 +32,8 @@ void MojoDemuxerStreamAdapter::Read(const DemuxerStream::ReadCB& read_cb) {
DVLOG(3) << __FUNCTION__;
// We shouldn't be holding on to a previous callback if a new Read() came in.
DCHECK(read_cb_.is_null());
+
+ DCHECK(stream_pipe_.is_valid());
read_cb_ = read_cb;
demuxer_stream_->Read(base::Bind(&MojoDemuxerStreamAdapter::OnBufferReady,
weak_factory_.GetWeakPtr()));
@@ -37,17 +41,15 @@ void MojoDemuxerStreamAdapter::Read(const DemuxerStream::ReadCB& read_cb) {
AudioDecoderConfig MojoDemuxerStreamAdapter::audio_decoder_config() {
DCHECK_EQ(type_, DemuxerStream::AUDIO);
- DCHECK(!audio_config_queue_.empty());
- return audio_config_queue_.front();
+ return audio_config_;
}
VideoDecoderConfig MojoDemuxerStreamAdapter::video_decoder_config() {
DCHECK_EQ(type_, DemuxerStream::VIDEO);
- DCHECK(!video_config_queue_.empty());
- return video_config_queue_.front();
+ return video_config_;
}
-DemuxerStream::Type MojoDemuxerStreamAdapter::type() {
+DemuxerStream::Type MojoDemuxerStreamAdapter::type() const {
return type_;
}
@@ -64,74 +66,79 @@ VideoRotation MojoDemuxerStreamAdapter::video_rotation() {
return VIDEO_ROTATION_0;
}
+// TODO(xhwang): Pass liveness here.
void MojoDemuxerStreamAdapter::OnStreamReady(
- mojo::ScopedDataPipeConsumerHandle pipe) {
+ mojo::DemuxerStream::Type type,
+ mojo::ScopedDataPipeConsumerHandle pipe,
+ mojo::AudioDecoderConfigPtr audio_config,
+ mojo::VideoDecoderConfigPtr video_config) {
DVLOG(1) << __FUNCTION__;
- // TODO(tim): We don't support pipe streaming yet.
- DCHECK(!pipe.is_valid());
- DCHECK_NE(type_, DemuxerStream::UNKNOWN);
- stream_ready_cb_.Run();
-}
-
-void MojoDemuxerStreamAdapter::OnAudioDecoderConfigChanged(
- mojo::AudioDecoderConfigPtr config) {
- DCHECK(type_ == DemuxerStream::UNKNOWN || type_ == DemuxerStream::AUDIO)
- << type_;
- type_ = DemuxerStream::AUDIO;
-
- audio_config_queue_.push(config.To<AudioDecoderConfig>());
+ DCHECK(pipe.is_valid());
+ DCHECK_EQ(DemuxerStream::UNKNOWN, type_);
- if (!read_cb_.is_null()) {
- read_cb_.Run(DemuxerStream::Status::kConfigChanged, NULL);
- read_cb_.Reset();
- }
-}
-
-void MojoDemuxerStreamAdapter::OnVideoDecoderConfigChanged(
- mojo::VideoDecoderConfigPtr config) {
- DCHECK(type_ == DemuxerStream::UNKNOWN || type_ == DemuxerStream::VIDEO)
- << type_;
- type_ = DemuxerStream::VIDEO;
+ type_ = static_cast<DemuxerStream::Type>(type);
+ stream_pipe_ = pipe.Pass();
+ UpdateConfig(audio_config.Pass(), video_config.Pass());
- video_config_queue_.push(config.To<VideoDecoderConfig>());
-
- if (!read_cb_.is_null()) {
- read_cb_.Run(DemuxerStream::Status::kConfigChanged, NULL);
- read_cb_.Reset();
- }
+ stream_ready_cb_.Run();
}
void MojoDemuxerStreamAdapter::OnBufferReady(
mojo::DemuxerStream::Status status,
- mojo::MediaDecoderBufferPtr buffer) {
+ mojo::MediaDecoderBufferPtr buffer,
+ mojo::AudioDecoderConfigPtr audio_config,
+ mojo::VideoDecoderConfigPtr video_config) {
DVLOG(3) << __FUNCTION__;
DCHECK(!read_cb_.is_null());
DCHECK_NE(type_, DemuxerStream::UNKNOWN);
+ DCHECK(stream_pipe_.is_valid());
- DemuxerStream::Status media_status(
- static_cast<DemuxerStream::Status>(status));
+ if (status == mojo::DemuxerStream::STATUS_CONFIG_CHANGED) {
+ UpdateConfig(audio_config.Pass(), video_config.Pass());
+ base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kConfigChanged, nullptr);
+ return;
+ }
+
+ if (status == mojo::DemuxerStream::STATUS_ABORTED) {
+ base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kAborted, nullptr);
+ return;
+ }
+
+ DCHECK_EQ(status, mojo::DemuxerStream::STATUS_OK);
scoped_refptr<DecoderBuffer> media_buffer(
buffer.To<scoped_refptr<DecoderBuffer>>());
- if (status == mojo::DemuxerStream::STATUS_CONFIG_CHANGED) {
- DCHECK(!media_buffer.get());
-
- // If the configuration queue is empty we need to wait for a config change
- // event before invoking |read_cb_|.
-
- if (type_ == DemuxerStream::AUDIO) {
- audio_config_queue_.pop();
- if (audio_config_queue_.empty())
- return;
- } else if (type_ == DemuxerStream::VIDEO) {
- video_config_queue_.pop();
- if (video_config_queue_.empty())
- return;
- }
+ if (!media_buffer->end_of_stream()) {
+ DCHECK_GT(media_buffer->data_size(), 0);
+
+ // Read the inner data for the DecoderBuffer from our DataPipe.
+ uint32_t num_bytes = media_buffer->data_size();
+ CHECK_EQ(ReadDataRaw(stream_pipe_.get(), media_buffer->writable_data(),
+ &num_bytes, MOJO_READ_DATA_FLAG_ALL_OR_NONE),
+ MOJO_RESULT_OK);
+ CHECK_EQ(num_bytes, static_cast<uint32_t>(media_buffer->data_size()));
}
- read_cb_.Run(media_status, media_buffer);
- read_cb_.Reset();
+ base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kOk, media_buffer);
+}
+
+void MojoDemuxerStreamAdapter::UpdateConfig(
+ mojo::AudioDecoderConfigPtr audio_config,
+ mojo::VideoDecoderConfigPtr video_config) {
+ DCHECK_NE(type_, DemuxerStream::UNKNOWN);
+
+ switch(type_) {
+ case DemuxerStream::AUDIO:
+ DCHECK(audio_config && !video_config);
+ audio_config_ = audio_config.To<AudioDecoderConfig>();
+ break;
+ case DemuxerStream::VIDEO:
+ DCHECK(video_config && !audio_config);
+ video_config_ = video_config.To<VideoDecoderConfig>();
+ break;
+ default:
+ NOTREACHED();
+ }
}
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h
index 213222afd7d..ee7c64de4b3 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h
@@ -19,13 +19,12 @@ namespace media {
// that is part of a Pipeline in a remote application. Roughly speaking, it
// takes a mojo::DemuxerStreamPtr and exposes it as a DemuxerStream for use by
// media components.
-class MojoDemuxerStreamAdapter : public DemuxerStream,
- public mojo::DemuxerStreamClient {
+class MojoDemuxerStreamAdapter : public DemuxerStream {
public:
// |demuxer_stream| is connected to the mojo::DemuxerStream that |this| will
// become the client of.
- // |stream_ready_cb| will be invoked when |stream| has fully initialized
- // and |this| is ready for use.
+ // |stream_ready_cb| will be invoked when |demuxer_stream| has fully
+ // initialized and |this| is ready for use.
// NOTE: Illegal to call any methods until |stream_ready_cb| is invoked.
MojoDemuxerStreamAdapter(mojo::DemuxerStreamPtr demuxer_stream,
const base::Closure& stream_ready_cb);
@@ -35,21 +34,26 @@ class MojoDemuxerStreamAdapter : public DemuxerStream,
void Read(const ReadCB& read_cb) override;
AudioDecoderConfig audio_decoder_config() override;
VideoDecoderConfig video_decoder_config() override;
- Type type() override;
+ Type type() const override;
void EnableBitstreamConverter() override;
bool SupportsConfigChanges() override;
VideoRotation video_rotation() override;
- // mojo::DemuxerStreamClient implementation.
- void OnStreamReady(mojo::ScopedDataPipeConsumerHandle pipe) override;
- void OnAudioDecoderConfigChanged(mojo::AudioDecoderConfigPtr config) override;
- void OnVideoDecoderConfigChanged(mojo::VideoDecoderConfigPtr config) override;
-
private:
+ void OnStreamReady(mojo::DemuxerStream::Type type,
+ mojo::ScopedDataPipeConsumerHandle pipe,
+ mojo::AudioDecoderConfigPtr audio_config,
+ mojo::VideoDecoderConfigPtr video_config);
+
// The callback from |demuxer_stream_| that a read operation has completed.
// |read_cb| is a callback from the client who invoked Read() on |this|.
void OnBufferReady(mojo::DemuxerStream::Status status,
- mojo::MediaDecoderBufferPtr buffer);
+ mojo::MediaDecoderBufferPtr buffer,
+ mojo::AudioDecoderConfigPtr audio_config,
+ mojo::VideoDecoderConfigPtr video_config);
+
+ void UpdateConfig(mojo::AudioDecoderConfigPtr audio_config,
+ mojo::VideoDecoderConfigPtr video_config);
// See constructor for descriptions.
mojo::DemuxerStreamPtr demuxer_stream_;
@@ -62,13 +66,15 @@ class MojoDemuxerStreamAdapter : public DemuxerStream,
// on to the caller of Read() until OnAudioDecoderConfigChanged is observed.
DemuxerStream::ReadCB read_cb_;
- // The front of the queue is the current config. We pop when we observe
- // DemuxerStatus::CONFIG_CHANGED.
- std::queue<AudioDecoderConfig> audio_config_queue_;
- std::queue<VideoDecoderConfig> video_config_queue_;
+ // The current config.
+ AudioDecoderConfig audio_config_;
+ VideoDecoderConfig video_config_;
DemuxerStream::Type type_;
+ // DataPipe for deserializing the data section of DecoderBuffers from.
+ mojo::ScopedDataPipeConsumerHandle stream_pipe_;
+
base::WeakPtrFactory<MojoDemuxerStreamAdapter> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(MojoDemuxerStreamAdapter);
};
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc b/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc
index 861ba2de778..0f91c9361ab 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc
@@ -5,13 +5,11 @@
#include "media/mojo/services/mojo_demuxer_stream_impl.h"
#include "base/bind.h"
-#include "base/macros.h"
#include "media/base/audio_decoder_config.h"
+#include "media/base/decoder_buffer.h"
#include "media/base/video_decoder_config.h"
-#include "media/mojo/interfaces/demuxer_stream.mojom.h"
#include "media/mojo/services/media_type_converters.h"
-#include "mojo/public/cpp/bindings/interface_impl.h"
-#include "mojo/public/cpp/system/data_pipe.h"
+#include "third_party/mojo/src/mojo/public/cpp/system/data_pipe.h"
namespace media {
@@ -22,50 +20,106 @@ MojoDemuxerStreamImpl::MojoDemuxerStreamImpl(media::DemuxerStream* stream)
MojoDemuxerStreamImpl::~MojoDemuxerStreamImpl() {
}
-void MojoDemuxerStreamImpl::Read(const mojo::Callback<
- void(mojo::DemuxerStream::Status, mojo::MediaDecoderBufferPtr)>& callback) {
+// This is called when our DemuxerStreamClient has connected itself and is
+// ready to receive messages. Send an initial config and notify it that
+// we are now ready for business.
+void MojoDemuxerStreamImpl::Initialize(const InitializeCallback& callback) {
+ DVLOG(2) << __FUNCTION__;
+ MojoCreateDataPipeOptions options;
+ options.struct_size = sizeof(MojoCreateDataPipeOptions);
+ options.flags = MOJO_CREATE_DATA_PIPE_OPTIONS_FLAG_NONE;
+ options.element_num_bytes = 1;
+
+ // Allocate DataPipe sizes based on content type to reduce overhead. If this
+ // is still too burdensome we can adjust for sample rate or resolution.
+ if (stream_->type() == media::DemuxerStream::VIDEO) {
+ // Video can get quite large; at 4K, VP9 delivers packets which are ~1MB in
+ // size; so allow for 50% headroom.
+ options.capacity_num_bytes = 1.5 * (1024 * 1024);
+ } else {
+ // Other types don't require a lot of room, so use a smaller pipe.
+ options.capacity_num_bytes = 512 * 1024;
+ }
+
+ mojo::DataPipe data_pipe(options);
+ stream_pipe_ = data_pipe.producer_handle.Pass();
+
+ // Prepare the initial config.
+ mojo::AudioDecoderConfigPtr audio_config;
+ mojo::VideoDecoderConfigPtr video_config;
+ if (stream_->type() == media::DemuxerStream::AUDIO) {
+ audio_config =
+ mojo::AudioDecoderConfig::From(stream_->audio_decoder_config());
+ } else if (stream_->type() == media::DemuxerStream::VIDEO) {
+ video_config =
+ mojo::VideoDecoderConfig::From(stream_->video_decoder_config());
+ } else {
+ NOTREACHED() << "Unsupported stream type: " << stream_->type();
+ return;
+ }
+
+ callback.Run(static_cast<mojo::DemuxerStream::Type>(stream_->type()),
+ data_pipe.consumer_handle.Pass(), audio_config.Pass(),
+ video_config.Pass());
+}
+
+void MojoDemuxerStreamImpl::Read(const ReadCallback& callback) {
stream_->Read(base::Bind(&MojoDemuxerStreamImpl::OnBufferReady,
- weak_factory_.GetWeakPtr(),
- callback));
+ weak_factory_.GetWeakPtr(), callback));
}
void MojoDemuxerStreamImpl::OnBufferReady(
- const BufferReadyCB& callback,
+ const ReadCallback& callback,
media::DemuxerStream::Status status,
const scoped_refptr<media::DecoderBuffer>& buffer) {
+ mojo::AudioDecoderConfigPtr audio_config;
+ mojo::VideoDecoderConfigPtr video_config;
+
if (status == media::DemuxerStream::kConfigChanged) {
+ DVLOG(2) << __FUNCTION__ << ": ConfigChange!";
// Send the config change so our client can read it once it parses the
// Status obtained via Run() below.
if (stream_->type() == media::DemuxerStream::AUDIO) {
- client()->OnAudioDecoderConfigChanged(
- mojo::AudioDecoderConfig::From(stream_->audio_decoder_config()));
+ audio_config =
+ mojo::AudioDecoderConfig::From(stream_->audio_decoder_config());
} else if (stream_->type() == media::DemuxerStream::VIDEO) {
- client()->OnVideoDecoderConfigChanged(
- mojo::VideoDecoderConfig::From(stream_->video_decoder_config()));
+ video_config =
+ mojo::VideoDecoderConfig::From(stream_->video_decoder_config());
+ } else {
+ NOTREACHED() << "Unsupported config change encountered for type: "
+ << stream_->type();
}
+
+ callback.Run(mojo::DemuxerStream::STATUS_CONFIG_CHANGED,
+ mojo::MediaDecoderBufferPtr(), audio_config.Pass(),
+ video_config.Pass());
+ return;
}
- // TODO(tim): Once using DataPipe, fill via the producer handle and then
- // read more to keep the pipe full.
- callback.Run(static_cast<mojo::DemuxerStream::Status>(status),
- mojo::MediaDecoderBuffer::From(buffer));
-}
+ if (status == media::DemuxerStream::kAborted) {
+ callback.Run(mojo::DemuxerStream::STATUS_ABORTED,
+ mojo::MediaDecoderBufferPtr(), audio_config.Pass(),
+ video_config.Pass());
+ return;
+ }
-void MojoDemuxerStreamImpl::OnConnectionEstablished() {
- // This is called when our DemuxerStreamClient has connected itself and is
- // ready to receive messages. Send an initial config and notify it that
- // we are now ready for business.
- if (stream_->type() == media::DemuxerStream::AUDIO) {
- client()->OnAudioDecoderConfigChanged(
- mojo::AudioDecoderConfig::From(stream_->audio_decoder_config()));
- } else if (stream_->type() == media::DemuxerStream::VIDEO) {
- client()->OnVideoDecoderConfigChanged(
- mojo::VideoDecoderConfig::From(stream_->video_decoder_config()));
+ DCHECK_EQ(status, media::DemuxerStream::kOk);
+ if (!buffer->end_of_stream()) {
+ DCHECK_GT(buffer->data_size(), 0);
+ // Serialize the data section of the DecoderBuffer into our pipe.
+ uint32_t num_bytes = buffer->data_size();
+ CHECK_EQ(WriteDataRaw(stream_pipe_.get(), buffer->data(), &num_bytes,
+ MOJO_READ_DATA_FLAG_ALL_OR_NONE),
+ MOJO_RESULT_OK);
+ CHECK_EQ(num_bytes, static_cast<uint32_t>(buffer->data_size()));
}
- // TODO(tim): Create a DataPipe, hold the producer handle, and pass the
- // consumer handle here.
- client()->OnStreamReady(mojo::ScopedDataPipeConsumerHandle());
+ // TODO(dalecurtis): Once we can write framed data to the DataPipe, fill via
+ // the producer handle and then read more to keep the pipe full. Waiting for
+ // space can be accomplished using an AsyncWaiter.
+ callback.Run(static_cast<mojo::DemuxerStream::Status>(status),
+ mojo::MediaDecoderBuffer::From(buffer), audio_config.Pass(),
+ video_config.Pass());
}
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_impl.h b/chromium/media/mojo/services/mojo_demuxer_stream_impl.h
index 46333e4c80c..850fc90c299 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_impl.h
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_impl.h
@@ -9,7 +9,7 @@
#include "base/memory/weak_ptr.h"
#include "media/base/demuxer_stream.h"
#include "media/mojo/interfaces/demuxer_stream.mojom.h"
-#include "mojo/public/cpp/bindings/interface_impl.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_impl.h"
namespace media {
class DemuxerStream;
@@ -24,26 +24,21 @@ class MojoDemuxerStreamImpl : public mojo::InterfaceImpl<mojo::DemuxerStream> {
~MojoDemuxerStreamImpl() override;
// mojo::DemuxerStream implementation.
- void Read(const mojo::Callback<void(mojo::DemuxerStream::Status,
- mojo::MediaDecoderBufferPtr)>& callback)
- override;
-
- // mojo::InterfaceImpl overrides.
- void OnConnectionEstablished() override;
+ // InitializeCallback and ReadCallback are defined in mojo::DemuxerStream.
+ void Initialize(const InitializeCallback& callback) override;
+ void Read(const ReadCallback& callback) override;
private:
- // |callback| is the callback that was passed to the initiating Read()
- // call by our client.
- // |status| and |buffer| are the standard media::ReadCB parameters.
- typedef mojo::Callback<void(mojo::DemuxerStream::Status,
- mojo::MediaDecoderBufferPtr)> BufferReadyCB;
- void OnBufferReady(const BufferReadyCB& callback,
+ void OnBufferReady(const ReadCallback& callback,
media::DemuxerStream::Status status,
const scoped_refptr<media::DecoderBuffer>& buffer);
// See constructor. We do not own |stream_|.
media::DemuxerStream* stream_;
+ // DataPipe for serializing the data section of DecoderBuffer into.
+ mojo::ScopedDataPipeProducerHandle stream_pipe_;
+
base::WeakPtrFactory<MojoDemuxerStreamImpl> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(MojoDemuxerStreamImpl);
};
diff --git a/chromium/media/mojo/services/mojo_media_application.cc b/chromium/media/mojo/services/mojo_media_application.cc
new file mode 100644
index 00000000000..7aeaabc250b
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_media_application.cc
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "media/mojo/services/mojo_renderer_service.h"
+#include "mojo/application/application_runner_chromium.h"
+#include "mojo/application/public/cpp/application_connection.h"
+#include "mojo/application/public/cpp/application_delegate.h"
+#include "mojo/application/public/cpp/application_impl.h"
+#include "mojo/application/public/cpp/interface_factory_impl.h"
+#include "third_party/mojo/src/mojo/public/c/system/main.h"
+
+namespace media {
+
+class MojoMediaApplication
+ : public mojo::ApplicationDelegate,
+ public mojo::InterfaceFactory<mojo::MediaRenderer> {
+ public:
+ // mojo::ApplicationDelegate implementation.
+ void Initialize(mojo::ApplicationImpl* app) override {
+ logging::LoggingSettings settings;
+ settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+ logging::InitLogging(settings);
+ // Display process ID, thread ID and timestamp in logs.
+ logging::SetLogItems(true, true, true, false);
+ }
+
+ bool ConfigureIncomingConnection(
+ mojo::ApplicationConnection* connection) override {
+ connection->AddService(this);
+ return true;
+ }
+
+ // mojo::InterfaceFactory<mojo::MediaRenderer> implementation.
+ void Create(mojo::ApplicationConnection* connection,
+ mojo::InterfaceRequest<mojo::MediaRenderer> request) override {
+ mojo::BindToRequest(new MojoRendererService(), &request);
+ }
+};
+
+} // namespace media
+
+MojoResult MojoMain(MojoHandle mojo_handle) {
+ mojo::ApplicationRunnerChromium runner(new media::MojoMediaApplication());
+ return runner.Run(mojo_handle);
+}
diff --git a/chromium/media/mojo/services/mojo_renderer_factory.cc b/chromium/media/mojo/services/mojo_renderer_factory.cc
new file mode 100644
index 00000000000..b526907d58d
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_renderer_factory.cc
@@ -0,0 +1,31 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/services/mojo_renderer_factory.h"
+
+#include "base/single_thread_task_runner.h"
+#include "media/mojo/services/mojo_renderer_impl.h"
+
+namespace media {
+
+MojoRendererFactory::MojoRendererFactory(
+ scoped_ptr<ServiceProvider> service_provider)
+ : service_provider_(service_provider.Pass()) {
+ DCHECK(service_provider_.get());
+}
+
+MojoRendererFactory::~MojoRendererFactory() {
+}
+
+scoped_ptr<Renderer> MojoRendererFactory::CreateRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ AudioRendererSink* /* audio_renderer_sink */,
+ VideoRendererSink* /* video_renderer_sink */) {
+ mojo::MediaRendererPtr mojo_media_renderer;
+ service_provider_->ConnectToService(&mojo_media_renderer);
+ return scoped_ptr<Renderer>(
+ new MojoRendererImpl(media_task_runner, mojo_media_renderer.Pass()));
+}
+
+} // namespace media
diff --git a/chromium/media/mojo/services/mojo_renderer_factory.h b/chromium/media/mojo/services/mojo_renderer_factory.h
new file mode 100644
index 00000000000..944fae512e2
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_renderer_factory.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_SERVICES_MOJO_RENDERER_FACTORY_H_
+#define MEDIA_MOJO_SERVICES_MOJO_RENDERER_FACTORY_H_
+
+#include "media/base/media_export.h"
+#include "media/base/renderer_factory.h"
+#include "media/mojo/interfaces/media_renderer.mojom.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_ptr.h"
+
+namespace media {
+
+// The default factory class for creating MojoRendererImpl.
+class MEDIA_EXPORT MojoRendererFactory : public RendererFactory {
+ public:
+ // A class that can help get a mojo::MediaRenderer service for
+ // MojoRendererFactory.
+ class ServiceProvider {
+ public:
+ ServiceProvider() {};
+ virtual ~ServiceProvider() {};
+ virtual void ConnectToService(
+ mojo::InterfacePtr<mojo::MediaRenderer>* media_renderer_ptr) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ServiceProvider);
+ };
+
+ explicit MojoRendererFactory(scoped_ptr<ServiceProvider> service_provider);
+ ~MojoRendererFactory() final;
+
+ scoped_ptr<Renderer> CreateRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ AudioRendererSink* audio_renderer_sink,
+ VideoRendererSink* video_renderer_sink) final;
+
+ private:
+ scoped_ptr<ServiceProvider> service_provider_;
+
+ DISALLOW_COPY_AND_ASSIGN(MojoRendererFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_MOJO_SERVICES_MOJO_RENDERER_FACTORY_H_
diff --git a/chromium/media/mojo/services/mojo_renderer_impl.cc b/chromium/media/mojo/services/mojo_renderer_impl.cc
index 9ed903f0a31..21736af7e1c 100644
--- a/chromium/media/mojo/services/mojo_renderer_impl.cc
+++ b/chromium/media/mojo/services/mojo_renderer_impl.cc
@@ -11,43 +11,42 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/demuxer_stream_provider.h"
#include "media/mojo/services/mojo_demuxer_stream_impl.h"
-#include "mojo/public/cpp/application/connect.h"
-#include "mojo/public/cpp/bindings/interface_impl.h"
-#include "mojo/public/interfaces/application/service_provider.mojom.h"
+#include "mojo/application/public/cpp/connect.h"
+#include "mojo/application/public/interfaces/service_provider.mojom.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_impl.h"
namespace media {
MojoRendererImpl::MojoRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- mojo::ServiceProvider* audio_renderer_provider)
+ mojo::MediaRendererPtr remote_media_renderer)
: task_runner_(task_runner),
+ remote_media_renderer_(remote_media_renderer.Pass()),
+ binding_(this),
weak_factory_(this) {
DVLOG(1) << __FUNCTION__;
- // For now we only support audio and there must be a provider.
- DCHECK(audio_renderer_provider);
- mojo::ConnectToService(audio_renderer_provider, &remote_audio_renderer_);
- remote_audio_renderer_.set_client(this);
}
MojoRendererImpl::~MojoRendererImpl() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- // Connection to |remote_audio_renderer_| will error-out here.
+ // Connection to |remote_media_renderer_| will error-out here.
}
+// TODO(xhwang): Support |waiting_for_decryption_key_cb| if needed.
void MojoRendererImpl::Initialize(
DemuxerStreamProvider* demuxer_stream_provider,
- const base::Closure& init_cb,
+ const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
- const BufferingStateCB& buffering_state_cb) {
+ const base::Closure& /* waiting_for_decryption_key_cb */) {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(demuxer_stream_provider);
demuxer_stream_provider_ = demuxer_stream_provider;
- // |init_cb| can be called on other thread.
init_cb_ = init_cb;
ended_cb_ = ended_cb;
error_cb_ = error_cb;
@@ -68,17 +67,28 @@ void MojoRendererImpl::Initialize(
if (video)
mojo::BindToProxy(new MojoDemuxerStreamImpl(video), &video_stream);
- remote_audio_renderer_->Initialize(
+ mojo::MediaRendererClientPtr client_ptr;
+ binding_.Bind(GetProxy(&client_ptr));
+ remote_media_renderer_->Initialize(
+ client_ptr.Pass(),
audio_stream.Pass(),
video_stream.Pass(),
BindToCurrentLoop(base::Bind(&MojoRendererImpl::OnInitialized,
weak_factory_.GetWeakPtr())));
}
+void MojoRendererImpl::SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ NOTIMPLEMENTED();
+ cdm_attached_cb.Run(false);
+}
+
void MojoRendererImpl::Flush(const base::Closure& flush_cb) {
DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- remote_audio_renderer_->Flush(flush_cb);
+ remote_media_renderer_->Flush(flush_cb);
}
void MojoRendererImpl::StartPlayingFrom(base::TimeDelta time) {
@@ -90,19 +100,19 @@ void MojoRendererImpl::StartPlayingFrom(base::TimeDelta time) {
time_ = time;
}
- remote_audio_renderer_->StartPlayingFrom(time.InMicroseconds());
+ remote_media_renderer_->StartPlayingFrom(time.InMicroseconds());
}
-void MojoRendererImpl::SetPlaybackRate(float playback_rate) {
+void MojoRendererImpl::SetPlaybackRate(double playback_rate) {
DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- remote_audio_renderer_->SetPlaybackRate(playback_rate);
+ remote_media_renderer_->SetPlaybackRate(playback_rate);
}
void MojoRendererImpl::SetVolume(float volume) {
DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- remote_audio_renderer_->SetVolume(volume);
+ remote_media_renderer_->SetVolume(volume);
}
base::TimeDelta MojoRendererImpl::GetMediaTime() {
@@ -114,7 +124,7 @@ base::TimeDelta MojoRendererImpl::GetMediaTime() {
bool MojoRendererImpl::HasAudio() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(remote_audio_renderer_.get()); // We always bind the renderer.
+ DCHECK(remote_media_renderer_.get()); // We always bind the renderer.
return !!demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO);
}
@@ -124,12 +134,6 @@ bool MojoRendererImpl::HasVideo() {
return !!demuxer_stream_provider_->GetStream(DemuxerStream::VIDEO);
}
-void MojoRendererImpl::SetCdm(MediaKeys* cdm) {
- DVLOG(1) << __FUNCTION__;
- DCHECK(task_runner_->BelongsToCurrentThread());
- NOTIMPLEMENTED();
-}
-
void MojoRendererImpl::OnTimeUpdate(int64_t time_usec, int64_t max_time_usec) {
DVLOG(3) << __FUNCTION__ << ": " << time_usec << ", " << max_time_usec;
@@ -171,7 +175,7 @@ void MojoRendererImpl::OnEnded() {
return;
}
- base::ResetAndReturn(&ended_cb_).Run();
+ ended_cb_.Run();
}
void MojoRendererImpl::OnError() {
@@ -189,15 +193,14 @@ void MojoRendererImpl::OnError() {
if (init_cb_.is_null()) // We have initialized already.
error_cb_.Run(PIPELINE_ERROR_DECODE);
else
- error_cb_.Run(PIPELINE_ERROR_COULD_NOT_RENDER);
+ init_cb_.Run(PIPELINE_ERROR_COULD_NOT_RENDER);
}
void MojoRendererImpl::OnInitialized() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(!init_cb_.is_null());
-
- base::ResetAndReturn(&init_cb_).Run();
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
}
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_renderer_impl.h b/chromium/media/mojo/services/mojo_renderer_impl.h
index 24a319bc739..7b955a2cc1b 100644
--- a/chromium/media/mojo/services/mojo_renderer_impl.h
+++ b/chromium/media/mojo/services/mojo_renderer_impl.h
@@ -14,11 +14,8 @@ namespace base {
class SingleThreadTaskRunner;
}
-namespace mojo {
-class ServiceProvider;
-}
-
namespace media {
+
class DemuxerStreamProvider;
// A media::Renderer that proxies to a mojo::MediaRenderer. That
@@ -27,33 +24,31 @@ class DemuxerStreamProvider;
//
// MojoRendererImpl implements media::Renderer for use as either an audio
// or video renderer.
-//
-// TODO(tim): Only audio is currently supported. http://crbug.com/410451.
class MojoRendererImpl : public Renderer, public mojo::MediaRendererClient {
public:
// |task_runner| is the TaskRunner on which all methods are invoked.
- // |audio_renderer_provider| is a ServiceProvider from a connected
- // Application that is hosting a mojo::MediaRenderer.
MojoRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- mojo::ServiceProvider* audio_renderer_provider);
+ mojo::MediaRendererPtr remote_media_renderer);
~MojoRendererImpl() override;
// Renderer implementation.
void Initialize(DemuxerStreamProvider* demuxer_stream_provider,
- const base::Closure& init_cb,
+ const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
- const BufferingStateCB& buffering_state_cb) override;
+ const base::Closure& waiting_for_decryption_key_cb) override;
+ void SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) override;
void Flush(const base::Closure& flush_cb) override;
void StartPlayingFrom(base::TimeDelta time) override;
- void SetPlaybackRate(float playback_rate) override;
+ void SetPlaybackRate(double playback_rate) override;
void SetVolume(float volume) override;
base::TimeDelta GetMediaTime() override;
bool HasAudio() override;
bool HasVideo() override;
- void SetCdm(MediaKeys* cdm) override;
// mojo::MediaRendererClient implementation.
void OnTimeUpdate(int64_t time_usec, int64_t max_time_usec) override;
@@ -62,18 +57,19 @@ class MojoRendererImpl : public Renderer, public mojo::MediaRendererClient {
void OnError() override;
private:
- // Called when |remote_audio_renderer_| has finished initializing.
+ // Called when |remote_media_renderer_| has finished initializing.
void OnInitialized();
// Task runner used to execute pipeline tasks.
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DemuxerStreamProvider* demuxer_stream_provider_;
- mojo::MediaRendererPtr remote_audio_renderer_;
+ mojo::MediaRendererPtr remote_media_renderer_;
+ mojo::Binding<MediaRendererClient> binding_;
// Callbacks passed to Initialize() that we forward messages from
- // |remote_audio_renderer_| through.
- base::Closure init_cb_;
+ // |remote_media_renderer_| through.
+ PipelineStatusCB init_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
BufferingStateCB buffering_state_cb_;
diff --git a/chromium/media/mojo/services/mojo_renderer_service.cc b/chromium/media/mojo/services/mojo_renderer_service.cc
index 03b5040b2f3..c5dfd77dee6 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.cc
+++ b/chromium/media/mojo/services/mojo_renderer_service.cc
@@ -7,65 +7,30 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/memory/scoped_vector.h"
+#include "base/message_loop/message_loop.h"
#include "media/base/audio_decoder.h"
#include "media/base/audio_renderer.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/decryptor.h"
#include "media/base/media_log.h"
#include "media/base/video_renderer.h"
-#include "media/filters/audio_renderer_impl.h"
-#include "media/filters/renderer_impl.h"
-#include "media/filters/video_renderer_impl.h"
+#include "media/base/video_renderer_sink.h"
#include "media/mojo/services/demuxer_stream_provider_shim.h"
-#include "media/mojo/services/mojo_demuxer_stream_adapter.h"
#include "media/mojo/services/renderer_config.h"
-#include "mojo/application/application_runner_chromium.h"
-#include "mojo/public/c/system/main.h"
-#include "mojo/public/cpp/application/application_connection.h"
-#include "mojo/public/cpp/application/application_delegate.h"
-#include "mojo/public/cpp/application/interface_factory_impl.h"
+#include "media/renderers/audio_renderer_impl.h"
+#include "media/renderers/renderer_impl.h"
+#include "media/renderers/video_renderer_impl.h"
namespace media {
// Time interval to update media time.
const int kTimeUpdateIntervalMs = 50;
-static void LogMediaSourceError(const scoped_refptr<MediaLog>& media_log,
- const std::string& error) {
- media_log->AddEvent(media_log->CreateMediaSourceErrorEvent(error));
-}
-
-static void PaintNothing(const scoped_refptr<VideoFrame>& frame) {
-}
-
-class MojoRendererApplication
- : public mojo::ApplicationDelegate,
- public mojo::InterfaceFactory<mojo::MediaRenderer> {
- public:
- // mojo::ApplicationDelegate implementation.
- bool ConfigureIncomingConnection(
- mojo::ApplicationConnection* connection) override {
- connection->AddService(this);
- return true;
- }
-
- // mojo::InterfaceFactory<mojo::MediaRenderer> implementation.
- void Create(mojo::ApplicationConnection* connection,
- mojo::InterfaceRequest<mojo::MediaRenderer> request) override {
- mojo::BindToRequest(new MojoRendererService(connection), &request);
- }
-};
-
-static void MojoTrampoline(const mojo::Closure& closure) {
- closure.Run();
-}
-
-MojoRendererService::MojoRendererService(
- mojo::ApplicationConnection* connection)
+MojoRendererService::MojoRendererService()
: state_(STATE_UNINITIALIZED),
last_media_time_usec_(0),
- weak_factory_(this),
- weak_this_(weak_factory_.GetWeakPtr()) {
+ weak_factory_(this) {
+ weak_this_ = weak_factory_.GetWeakPtr();
DVLOG(1) << __FUNCTION__;
scoped_refptr<base::SingleThreadTaskRunner> task_runner(
@@ -73,26 +38,21 @@ MojoRendererService::MojoRendererService(
scoped_refptr<MediaLog> media_log(new MediaLog());
RendererConfig* renderer_config = RendererConfig::Get();
audio_renderer_sink_ = renderer_config->GetAudioRendererSink();
+ video_renderer_sink_ = renderer_config->GetVideoRendererSink();
scoped_ptr<AudioRenderer> audio_renderer(new AudioRendererImpl(
- task_runner,
- audio_renderer_sink_.get(),
- renderer_config->GetAudioDecoders(
- task_runner,
- base::Bind(&LogMediaSourceError, media_log)).Pass(),
- SetDecryptorReadyCB(),
- renderer_config->GetAudioHardwareConfig(),
- media_log));
+ task_runner, audio_renderer_sink_.get(),
+ renderer_config->GetAudioDecoders(task_runner,
+ base::Bind(&MediaLog::AddLogEvent,
+ media_log)).Pass(),
+ renderer_config->GetAudioHardwareConfig(), media_log));
scoped_ptr<VideoRenderer> video_renderer(new VideoRendererImpl(
- task_runner,
- renderer_config->GetVideoDecoders(
- task_runner,
- base::Bind(&LogMediaSourceError, media_log)).Pass(),
- SetDecryptorReadyCB(),
- base::Bind(&PaintNothing),
- true,
- media_log));
+ task_runner, video_renderer_sink_.get(),
+ renderer_config->GetVideoDecoders(task_runner,
+ base::Bind(&MediaLog::AddLogEvent,
+ media_log)).Pass(),
+ true, nullptr, media_log));
// Create renderer.
renderer_.reset(new RendererImpl(
@@ -102,13 +62,13 @@ MojoRendererService::MojoRendererService(
MojoRendererService::~MojoRendererService() {
}
-void MojoRendererService::Initialize(mojo::DemuxerStreamPtr audio,
+void MojoRendererService::Initialize(mojo::MediaRendererClientPtr client,
+ mojo::DemuxerStreamPtr audio,
mojo::DemuxerStreamPtr video,
const mojo::Closure& callback) {
DVLOG(1) << __FUNCTION__;
DCHECK_EQ(state_, STATE_UNINITIALIZED);
- DCHECK(client());
-
+ client_ = client.Pass();
state_ = STATE_INITIALIZING;
stream_provider_.reset(new DemuxerStreamProviderShim(
audio.Pass(),
@@ -121,8 +81,9 @@ void MojoRendererService::Flush(const mojo::Closure& callback) {
DCHECK_EQ(state_, STATE_PLAYING);
state_ = STATE_FLUSHING;
- time_update_timer_.Reset();
- renderer_->Flush(base::Bind(&MojoTrampoline, callback));
+ CancelPeriodicMediaTimeUpdates();
+ renderer_->Flush(
+ base::Bind(&MojoRendererService::OnFlushCompleted, weak_this_, callback));
}
void MojoRendererService::StartPlayingFrom(int64_t time_delta_usec) {
@@ -132,7 +93,7 @@ void MojoRendererService::StartPlayingFrom(int64_t time_delta_usec) {
SchedulePeriodicMediaTimeUpdates();
}
-void MojoRendererService::SetPlaybackRate(float playback_rate) {
+void MojoRendererService::SetPlaybackRate(double playback_rate) {
DVLOG(2) << __FUNCTION__ << ": " << playback_rate;
DCHECK_EQ(state_, STATE_PLAYING);
renderer_->SetPlaybackRate(playback_rate);
@@ -150,15 +111,19 @@ void MojoRendererService::OnStreamReady(const mojo::Closure& callback) {
base::Bind(
&MojoRendererService::OnRendererInitializeDone, weak_this_, callback),
base::Bind(&MojoRendererService::OnUpdateStatistics, weak_this_),
+ base::Bind(&MojoRendererService::OnBufferingStateChanged, weak_this_),
base::Bind(&MojoRendererService::OnRendererEnded, weak_this_),
base::Bind(&MojoRendererService::OnError, weak_this_),
- base::Bind(&MojoRendererService::OnBufferingStateChanged, weak_this_));
+ base::Bind(base::DoNothing));
}
void MojoRendererService::OnRendererInitializeDone(
- const mojo::Closure& callback) {
+ const mojo::Closure& callback, PipelineStatus status) {
DVLOG(1) << __FUNCTION__;
+ if (status != PIPELINE_OK && state_ != STATE_ERROR)
+ OnError(status);
+
if (state_ == STATE_ERROR) {
renderer_.reset();
} else {
@@ -177,10 +142,15 @@ void MojoRendererService::UpdateMediaTime(bool force) {
if (!force && media_time == last_media_time_usec_)
return;
- client()->OnTimeUpdate(media_time, media_time);
+ client_->OnTimeUpdate(media_time, media_time);
last_media_time_usec_ = media_time;
}
+void MojoRendererService::CancelPeriodicMediaTimeUpdates() {
+ UpdateMediaTime(false);
+ time_update_timer_.Reset();
+}
+
void MojoRendererService::SchedulePeriodicMediaTimeUpdates() {
UpdateMediaTime(true);
time_update_timer_.Start(
@@ -192,25 +162,27 @@ void MojoRendererService::SchedulePeriodicMediaTimeUpdates() {
void MojoRendererService::OnBufferingStateChanged(
BufferingState new_buffering_state) {
DVLOG(2) << __FUNCTION__ << "(" << new_buffering_state << ") ";
- client()->OnBufferingStateChange(
+ client_->OnBufferingStateChange(
static_cast<mojo::BufferingState>(new_buffering_state));
}
void MojoRendererService::OnRendererEnded() {
DVLOG(1) << __FUNCTION__;
- client()->OnEnded();
- time_update_timer_.Reset();
+ CancelPeriodicMediaTimeUpdates();
+ client_->OnEnded();
}
void MojoRendererService::OnError(PipelineStatus error) {
DVLOG(1) << __FUNCTION__;
state_ = STATE_ERROR;
- client()->OnError();
+ client_->OnError();
}
-} // namespace media
-
-MojoResult MojoMain(MojoHandle shell_handle) {
- mojo::ApplicationRunnerChromium runner(new media::MojoRendererApplication);
- return runner.Run(shell_handle);
+void MojoRendererService::OnFlushCompleted(const mojo::Closure& callback) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(state_, STATE_FLUSHING);
+ state_ = STATE_PLAYING;
+ callback.Run();
}
+
+} // namespace media
diff --git a/chromium/media/mojo/services/mojo_renderer_service.h b/chromium/media/mojo/services/mojo_renderer_service.h
index 35e47cd154d..1b8116829b0 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.h
+++ b/chromium/media/mojo/services/mojo_renderer_service.h
@@ -6,15 +6,17 @@
#define MEDIA_MOJO_SERVICES_MOJO_RENDERER_SERVICE_H_
#include "base/callback.h"
+#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/timer/timer.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/buffering_state.h"
+#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
#include "media/mojo/interfaces/media_renderer.mojom.h"
-#include "mojo/public/cpp/bindings/interface_impl.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_impl.h"
namespace mojo {
class ApplicationConnection;
@@ -24,27 +26,25 @@ namespace media {
class AudioRendererSink;
class DemuxerStreamProviderShim;
-class MojoDemuxerStreamAdapter;
class Renderer;
+class VideoRendererSink;
// A mojo::MediaRenderer implementation that uses media::AudioRenderer to
// decode and render audio to a sink obtained from the ApplicationConnection.
-class MojoRendererService : public mojo::InterfaceImpl<mojo::MediaRenderer> {
+class MEDIA_EXPORT MojoRendererService
+ : NON_EXPORTED_BASE(public mojo::InterfaceImpl<mojo::MediaRenderer>) {
public:
- // |connection| is a pointer to the connection back to our embedder. The
- // embedder should have configured it (via ConfigureOutgoingConnection) to
- // allow |this| to connect to a sink that will receive decoded data ready
- // for playback.
- explicit MojoRendererService(mojo::ApplicationConnection* connection);
+ MojoRendererService();
~MojoRendererService() override;
// mojo::MediaRenderer implementation.
- void Initialize(mojo::DemuxerStreamPtr audio,
+ void Initialize(mojo::MediaRendererClientPtr client,
+ mojo::DemuxerStreamPtr audio,
mojo::DemuxerStreamPtr video,
const mojo::Closure& callback) override;
void Flush(const mojo::Closure& callback) override;
void StartPlayingFrom(int64_t time_delta_usec) override;
- void SetPlaybackRate(float playback_rate) override;
+ void SetPlaybackRate(double playback_rate) override;
void SetVolume(float volume) override;
private:
@@ -56,12 +56,13 @@ class MojoRendererService : public mojo::InterfaceImpl<mojo::MediaRenderer> {
STATE_ERROR
};
- // Called when the MojoDemuxerStreamAdapter is ready to go (has a config,
+ // Called when the DemuxerStreamProviderShim is ready to go (has a config,
// pipe handle, etc) and can be handed off to a renderer for use.
void OnStreamReady(const mojo::Closure& callback);
// Called when |audio_renderer_| initialization has completed.
- void OnRendererInitializeDone(const mojo::Closure& callback);
+ void OnRendererInitializeDone(const mojo::Closure& callback,
+ PipelineStatus status);
// Callback executed by filters to update statistics.
void OnUpdateStatistics(const PipelineStatistics& stats);
@@ -70,6 +71,7 @@ class MojoRendererService : public mojo::InterfaceImpl<mojo::MediaRenderer> {
// if the media time has changed since the last update. If |force| is true,
// the client is notified even if the time is unchanged.
void UpdateMediaTime(bool force);
+ void CancelPeriodicMediaTimeUpdates();
void SchedulePeriodicMediaTimeUpdates();
// Callback executed by audio renderer when buffering state changes.
@@ -82,17 +84,23 @@ class MojoRendererService : public mojo::InterfaceImpl<mojo::MediaRenderer> {
// Callback executed when a runtime error happens.
void OnError(PipelineStatus error);
+ // Callback executed once Flush() completes.
+ void OnFlushCompleted(const mojo::Closure& callback);
+
State state_;
scoped_refptr<AudioRendererSink> audio_renderer_sink_;
+ scoped_ptr<VideoRendererSink> video_renderer_sink_;
scoped_ptr<Renderer> renderer_;
scoped_ptr<DemuxerStreamProviderShim> stream_provider_;
base::RepeatingTimer<MojoRendererService> time_update_timer_;
uint64_t last_media_time_usec_;
- base::WeakPtrFactory<MojoRendererService> weak_factory_;
+ mojo::MediaRendererClientPtr client_;
+
base::WeakPtr<MojoRendererService> weak_this_;
+ base::WeakPtrFactory<MojoRendererService> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(MojoRendererService);
};
diff --git a/chromium/media/mojo/services/mojo_type_trait.h b/chromium/media/mojo/services/mojo_type_trait.h
new file mode 100644
index 00000000000..19d87a126e6
--- /dev/null
+++ b/chromium/media/mojo/services/mojo_type_trait.h
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_SERVICES_MOJO_TYPE_TRAIT_H_
+#define MEDIA_MOJO_SERVICES_MOJO_TYPE_TRAIT_H_
+
+#include "media/base/media_keys.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/array.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/string.h"
+
+namespace media {
+
+// A trait class to help get the corresponding mojo type for a native type.
+template <typename T>
+class MojoTypeTrait {};
+
+template <>
+struct MojoTypeTrait<std::string> {
+ typedef mojo::String MojoType;
+ static MojoType DefaultValue() { return MojoType(); }
+};
+
+} // namespace media
+
+#endif // MEDIA_MOJO_SERVICES_MOJO_TYPE_TRAIT_H_
diff --git a/chromium/media/mojo/services/renderer_config.cc b/chromium/media/mojo/services/renderer_config.cc
index 311a260dbac..5522da2597f 100644
--- a/chromium/media/mojo/services/renderer_config.cc
+++ b/chromium/media/mojo/services/renderer_config.cc
@@ -34,6 +34,10 @@ scoped_refptr<AudioRendererSink> RendererConfig::GetAudioRendererSink() {
return renderer_config_->GetAudioRendererSink();
}
+scoped_ptr<VideoRendererSink> RendererConfig::GetVideoRendererSink() {
+ return renderer_config_->GetVideoRendererSink();
+}
+
const AudioHardwareConfig& RendererConfig::GetAudioHardwareConfig() {
return renderer_config_->GetAudioHardwareConfig();
}
diff --git a/chromium/media/mojo/services/renderer_config.h b/chromium/media/mojo/services/renderer_config.h
index 08b7e0bbc84..4a531d59eb2 100644
--- a/chromium/media/mojo/services/renderer_config.h
+++ b/chromium/media/mojo/services/renderer_config.h
@@ -13,6 +13,7 @@
#include "media/base/audio_renderer_sink.h"
#include "media/base/media_log.h"
#include "media/base/video_decoder.h"
+#include "media/base/video_renderer_sink.h"
namespace media {
@@ -34,13 +35,13 @@ class PlatformRendererConfig {
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
const LogCB& media_log_cb) = 0;
- // The audio output sink used for rendering audio.
+ // The output sink used for rendering audio or video respectively.
virtual scoped_refptr<AudioRendererSink> GetAudioRendererSink() = 0;
+ virtual scoped_ptr<VideoRendererSink> GetVideoRendererSink() = 0;
// The platform's audio hardware configuration. Note, this must remain
// constant for the lifetime of the PlatformRendererConfig.
virtual const AudioHardwareConfig& GetAudioHardwareConfig() = 0;
-
};
class RendererConfig {
@@ -57,6 +58,7 @@ class RendererConfig {
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
const LogCB& media_log_cb);
scoped_refptr<AudioRendererSink> GetAudioRendererSink();
+ scoped_ptr<VideoRendererSink> GetVideoRendererSink();
const AudioHardwareConfig& GetAudioHardwareConfig();
private:
diff --git a/chromium/media/mojo/services/renderer_config_default.cc b/chromium/media/mojo/services/renderer_config_default.cc
index 76da6db5770..7cee1a7ac6c 100644
--- a/chromium/media/mojo/services/renderer_config_default.cc
+++ b/chromium/media/mojo/services/renderer_config_default.cc
@@ -12,7 +12,7 @@
#include "media/base/media.h"
#include "media/filters/opus_audio_decoder.h"
-#if !defined(OS_ANDROID)
+#if !defined(MEDIA_DISABLE_FFMPEG)
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_video_decoder.h"
#endif
@@ -24,6 +24,20 @@
namespace media {
namespace internal {
+class DummyVideoRendererSink : public VideoRendererSink {
+ public:
+ DummyVideoRendererSink() {}
+ ~DummyVideoRendererSink() override {}
+
+ void Start(RenderCallback* callback) override {}
+ void Stop() override {}
+ void PaintFrameUsingOldRenderingPath(
+ const scoped_refptr<VideoFrame>& frame) override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DummyVideoRendererSink);
+};
+
class DefaultRendererConfig : public PlatformRendererConfig {
public:
DefaultRendererConfig() {
@@ -55,7 +69,7 @@ class DefaultRendererConfig : public PlatformRendererConfig {
const LogCB& media_log_cb) override {
ScopedVector<AudioDecoder> audio_decoders;
-#if !defined(OS_ANDROID)
+#if !defined(MEDIA_DISABLE_FFMPEG)
audio_decoders.push_back(
new FFmpegAudioDecoder(media_task_runner, media_log_cb));
audio_decoders.push_back(new OpusAudioDecoder(media_task_runner));
@@ -74,9 +88,9 @@ class DefaultRendererConfig : public PlatformRendererConfig {
#if !defined(MEDIA_DISABLE_LIBVPX)
video_decoders.push_back(new VpxVideoDecoder(media_task_runner));
-#endif // !defined(MEDIA_DISABLE_LIBVPX)
+#endif
-#if !defined(OS_ANDROID)
+#if !defined(MEDIA_DISABLE_FFMPEG)
video_decoders.push_back(new FFmpegVideoDecoder(media_task_runner));
#endif
@@ -87,6 +101,10 @@ class DefaultRendererConfig : public PlatformRendererConfig {
return new AudioOutputStreamSink();
}
+ scoped_ptr<VideoRendererSink> GetVideoRendererSink() override {
+ return make_scoped_ptr(new DummyVideoRendererSink());
+ }
+
const AudioHardwareConfig& GetAudioHardwareConfig() override {
return *audio_hardware_config_;
}
diff --git a/chromium/media/mojo/services/renderer_unittest.cc b/chromium/media/mojo/services/renderer_unittest.cc
deleted file mode 100644
index a191d726adf..00000000000
--- a/chromium/media/mojo/services/renderer_unittest.cc
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
-#include "media/base/audio_decoder_config.h"
-#include "media/base/channel_layout.h"
-#include "media/base/demuxer_stream_provider.h"
-#include "media/base/sample_format.h"
-#include "media/base/video_decoder_config.h"
-#include "media/mojo/services/mojo_renderer_impl.h"
-#include "mojo/public/c/system/main.h"
-#include "mojo/public/cpp/application/application_delegate.h"
-#include "mojo/public/cpp/application/application_impl.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-// This class is here to give the gtest class access to the
-// mojo::ApplicationImpl so that the tests can connect to other applications.
-class MojoRendererTestHelper : public mojo::ApplicationDelegate {
- public:
- MojoRendererTestHelper() : application_impl_(NULL) {}
- ~MojoRendererTestHelper() override {}
-
- // ApplicationDelegate implementation.
- void Initialize(mojo::ApplicationImpl* app) override {
- application_impl_ = app;
- }
-
- mojo::ApplicationImpl* application_impl() { return application_impl_; }
-
- private:
- mojo::ApplicationImpl* application_impl_;
-
- DISALLOW_COPY_AND_ASSIGN(MojoRendererTestHelper);
-};
-
-// TODO(tim): Reconcile this with mojo apptest framework when ready.
-MojoRendererTestHelper* g_test_delegate = NULL;
-
-// TODO(tim): Make media::FakeDemuxerStream support audio and use that for the
-// DemuxerStream implementation instead.
-class FakeDemuxerStream : public media::DemuxerStreamProvider,
- public media::DemuxerStream {
- public:
- FakeDemuxerStream() {}
- ~FakeDemuxerStream() override {}
-
- // media::Demuxer implementation.
- media::DemuxerStream* GetStream(media::DemuxerStream::Type type) override {
- DCHECK_EQ(media::DemuxerStream::AUDIO, type);
- return this;
- }
- media::DemuxerStreamProvider::Liveness GetLiveness() const override {
- return media::DemuxerStreamProvider::LIVENESS_UNKNOWN;
- }
-
- // media::DemuxerStream implementation.
- void Read(const ReadCB& read_cb) override {}
-
- media::AudioDecoderConfig audio_decoder_config() override {
- media::AudioDecoderConfig config;
- config.Initialize(media::kCodecAAC,
- media::kSampleFormatU8,
- media::CHANNEL_LAYOUT_SURROUND,
- 48000,
- NULL,
- 0,
- false,
- false,
- base::TimeDelta(),
- 0);
- return config;
- }
-
- media::VideoDecoderConfig video_decoder_config() override {
- NOTREACHED();
- return media::VideoDecoderConfig();
- }
-
- media::DemuxerStream::Type type() override {
- return media::DemuxerStream::AUDIO;
- }
-
- void EnableBitstreamConverter() override {}
-
- bool SupportsConfigChanges() override { return true; }
-
- media::VideoRotation video_rotation() override {
- NOTREACHED();
- return media::VIDEO_ROTATION_0;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(FakeDemuxerStream);
-};
-
-} // namespace
-
-namespace media {
-
-class MojoRendererTest : public testing::Test {
- public:
- MojoRendererTest() : service_provider_(NULL) {}
-
- void SetUp() override {
- demuxer_stream_provider_.reset(new FakeDemuxerStream());
- service_provider_ =
- g_test_delegate->application_impl()
- ->ConnectToApplication("mojo:media_mojo_renderer_app")
- ->GetServiceProvider();
- }
-
- mojo::ServiceProvider* service_provider() { return service_provider_; }
- DemuxerStreamProvider* stream_provider() {
- return demuxer_stream_provider_.get();
- }
- scoped_refptr<base::SingleThreadTaskRunner> task_runner() {
- return base::MessageLoop::current()->task_runner();
- }
-
- private:
- scoped_ptr<DemuxerStreamProvider> demuxer_stream_provider_;
- mojo::ServiceProvider* service_provider_;
-
- DISALLOW_COPY_AND_ASSIGN(MojoRendererTest);
-};
-
-void ErrorCallback(PipelineStatus* output, PipelineStatus status) {
- *output = status;
-}
-
-// Tests that a MojoRendererImpl can successfully establish communication
-// with a MojoRendererService and set up a MojoDemuxerStream
-// connection. The test also initializes a media::AudioRendererImpl which
-// will error-out expectedly due to lack of support for decoder selection.
-TEST_F(MojoRendererTest, BasicInitialize) {
- MojoRendererImpl rimpl(task_runner(), service_provider());
- PipelineStatus expected_error(PIPELINE_OK);
- rimpl.Initialize(stream_provider(),
- base::MessageLoop::current()->QuitClosure(),
- media::StatisticsCB(),
- base::Closure(),
- base::Bind(&ErrorCallback, &expected_error),
- media::BufferingStateCB());
- base::MessageLoop::current()->Run();
-
- // We expect an error during initialization because MojoRendererService
- // doesn't initialize any decoders, which causes an error.
- EXPECT_EQ(PIPELINE_ERROR_COULD_NOT_RENDER, expected_error);
-}
-
-} // namespace media
-
-MojoResult MojoMain(MojoHandle shell_handle) {
- base::CommandLine::Init(0, NULL);
-#if !defined(COMPONENT_BUILD)
- base::AtExitManager at_exit;
-#endif
-
- // TODO(tim): Reconcile this with apptest framework when it is ready.
- scoped_ptr<mojo::ApplicationDelegate> delegate(new MojoRendererTestHelper());
- g_test_delegate = static_cast<MojoRendererTestHelper*>(delegate.get());
- {
- base::MessageLoop loop;
- mojo::ApplicationImpl impl(
- delegate.get(),
- mojo::MakeScopedHandle(mojo::MessagePipeHandle(shell_handle)));
-
- int argc = 0;
- char** argv = NULL;
- testing::InitGoogleTest(&argc, argv);
- mojo_ignore_result(RUN_ALL_TESTS());
- }
-
- g_test_delegate = NULL;
- delegate.reset();
- return MOJO_RESULT_OK;
-}
diff --git a/chromium/media/ozone/OWNERS b/chromium/media/ozone/OWNERS
new file mode 100644
index 00000000000..4d01d1f0b46
--- /dev/null
+++ b/chromium/media/ozone/OWNERS
@@ -0,0 +1,2 @@
+spang@chromium.org
+alexst@chromium.org
diff --git a/chromium/media/ozone/media_ozone_platform.cc b/chromium/media/ozone/media_ozone_platform.cc
index 804de375802..cda13962820 100644
--- a/chromium/media/ozone/media_ozone_platform.cc
+++ b/chromium/media/ozone/media_ozone_platform.cc
@@ -4,8 +4,8 @@
#include "media/ozone/media_ozone_platform.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "base/trace_event/trace_event.h"
#include "ui/ozone/platform_object.h"
#include "ui/ozone/platform_selection.h"
@@ -17,7 +17,7 @@ class MediaOzonePlatformStub : public MediaOzonePlatform {
public:
MediaOzonePlatformStub() {}
- virtual ~MediaOzonePlatformStub() {}
+ ~MediaOzonePlatformStub() override {}
private:
DISALLOW_COPY_AND_ASSIGN(MediaOzonePlatformStub);
@@ -32,10 +32,18 @@ MediaOzonePlatform* CreateMediaOzonePlatformCaca() {
return new MediaOzonePlatformStub;
}
+MediaOzonePlatform* CreateMediaOzonePlatformCast() {
+ return new MediaOzonePlatformStub;
+}
+
MediaOzonePlatform* CreateMediaOzonePlatformDri() {
return new MediaOzonePlatformStub;
}
+MediaOzonePlatform* CreateMediaOzonePlatformDrm() {
+ return new MediaOzonePlatformStub;
+}
+
MediaOzonePlatform* CreateMediaOzonePlatformEgltest() {
return new MediaOzonePlatformStub;
}
diff --git a/chromium/media/filters/audio_renderer_impl.cc b/chromium/media/renderers/audio_renderer_impl.cc
index f61e3af90c2..59f05b321d0 100644
--- a/chromium/media/filters/audio_renderer_impl.cc
+++ b/chromium/media/renderers/audio_renderer_impl.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/audio_renderer_impl.h"
+#include "media/renderers/audio_renderer_impl.h"
#include <math.h>
@@ -44,18 +44,15 @@ AudioRendererImpl::AudioRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
media::AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
const AudioHardwareConfig& hardware_config,
const scoped_refptr<MediaLog>& media_log)
: task_runner_(task_runner),
expecting_config_changes_(false),
sink_(sink),
- audio_buffer_stream_(new AudioBufferStream(task_runner,
- decoders.Pass(),
- set_decryptor_ready_cb,
- media_log)),
+ audio_buffer_stream_(
+ new AudioBufferStream(task_runner, decoders.Pass(), media_log)),
hardware_config_(hardware_config),
- playback_rate_(0),
+ playback_rate_(0.0),
state_(kUninitialized),
buffering_state_(BUFFERING_HAVE_NOTHING),
rendering_(false),
@@ -103,7 +100,7 @@ void AudioRendererImpl::StartRendering_Locked() {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPlaying);
DCHECK(!sink_playing_);
- DCHECK_NE(playback_rate_, 0);
+ DCHECK_NE(playback_rate_, 0.0);
lock_.AssertAcquired();
sink_playing_ = true;
@@ -138,10 +135,11 @@ void AudioRendererImpl::StopRendering_Locked() {
base::AutoUnlock auto_unlock(lock_);
sink_->Pause();
+ stop_rendering_time_ = last_render_time_;
}
void AudioRendererImpl::SetMediaTime(base::TimeDelta time) {
- DVLOG(1) << __FUNCTION__ << "(" << time.InMicroseconds() << ")";
+ DVLOG(1) << __FUNCTION__ << "(" << time << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
@@ -150,7 +148,8 @@ void AudioRendererImpl::SetMediaTime(base::TimeDelta time) {
start_timestamp_ = time;
ended_timestamp_ = kInfiniteDuration();
- last_render_ticks_ = base::TimeTicks();
+ last_render_time_ = stop_rendering_time_ = base::TimeTicks();
+ first_packet_timestamp_ = kNoTimestamp();
audio_clock_.reset(new AudioClock(time, audio_parameters_.sample_rate()));
}
@@ -164,20 +163,46 @@ base::TimeDelta AudioRendererImpl::CurrentMediaTime() {
current_media_time = audio_clock_->front_timestamp();
}
- DVLOG(3) << __FUNCTION__ << ": " << current_media_time.InMilliseconds()
- << " ms";
+ DVLOG(2) << __FUNCTION__ << ": " << current_media_time;
return current_media_time;
}
-base::TimeDelta AudioRendererImpl::CurrentMediaTimeForSyncingVideo() {
- DVLOG(2) << __FUNCTION__;
-
+bool AudioRendererImpl::GetWallClockTimes(
+ const std::vector<base::TimeDelta>& media_timestamps,
+ std::vector<base::TimeTicks>* wall_clock_times) {
base::AutoLock auto_lock(lock_);
- if (last_render_ticks_.is_null())
- return audio_clock_->front_timestamp();
+ if (last_render_time_.is_null() || !stop_rendering_time_.is_null() ||
+ !playback_rate_ || buffering_state_ != BUFFERING_HAVE_ENOUGH ||
+ !sink_playing_) {
+ return false;
+ }
+
+ DCHECK(wall_clock_times->empty());
+ wall_clock_times->reserve(media_timestamps.size());
+ for (const auto& media_timestamp : media_timestamps) {
+ base::TimeDelta base_time;
+ if (media_timestamp < audio_clock_->front_timestamp()) {
+ // See notes about |media_time| values less than |base_time| in TimeSource
+ // header.
+ base_time = audio_clock_->front_timestamp();
+ } else if (media_timestamp > audio_clock_->back_timestamp()) {
+ base_time = audio_clock_->back_timestamp();
+ } else {
+ // No need to estimate time, so return the actual wallclock time.
+ wall_clock_times->push_back(
+ last_render_time_ +
+ audio_clock_->TimeUntilPlayback(media_timestamp));
+ continue;
+ }
- return audio_clock_->TimestampSinceWriting(base::TimeTicks::Now() -
- last_render_ticks_);
+ // In practice, most calls will be estimates given the relatively small
+ // window in which clients can get the actual time.
+ wall_clock_times->push_back(
+ last_render_time_ + audio_clock_->TimeUntilPlayback(base_time) +
+ base::TimeDelta::FromMicroseconds(
+ (media_timestamp - base_time).InMicroseconds() / playback_rate_));
+ }
+ return true;
}
TimeSource* AudioRendererImpl::GetTimeSource() {
@@ -253,12 +278,15 @@ void AudioRendererImpl::StartPlaying() {
AttemptRead_Locked();
}
-void AudioRendererImpl::Initialize(DemuxerStream* stream,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb) {
+void AudioRendererImpl::Initialize(
+ DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const base::Closure& waiting_for_decryption_key_cb) {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(stream);
@@ -281,8 +309,9 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
ended_cb_ = ended_cb;
error_cb_ = error_cb;
+ const AudioParameters& hw_params = hardware_config_.GetOutputConfig();
expecting_config_changes_ = stream->SupportsConfigChanges();
- if (!expecting_config_changes_) {
+ if (!expecting_config_changes_ || !hw_params.IsValid()) {
// The actual buffer size is controlled via the size of the AudioBus
// provided to Render(), so just choose something reasonable here for looks.
int buffer_size = stream->audio_decoder_config().samples_per_second() / 100;
@@ -296,8 +325,6 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
buffer_size);
buffer_converter_.reset();
} else {
- // TODO(rileya): Support hardware config changes
- const AudioParameters& hw_params = hardware_config_.GetOutputConfig();
audio_parameters_.Reset(
hw_params.format(),
// Always use the source's channel layout and channel count to avoid
@@ -316,11 +343,9 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
new AudioClock(base::TimeDelta(), audio_parameters_.sample_rate()));
audio_buffer_stream_->Initialize(
- stream,
- false,
- statistics_cb,
- base::Bind(&AudioRendererImpl::OnAudioBufferStreamInitialized,
- weak_factory_.GetWeakPtr()));
+ stream, base::Bind(&AudioRendererImpl::OnAudioBufferStreamInitialized,
+ weak_factory_.GetWeakPtr()),
+ set_decryptor_ready_cb, statistics_cb, waiting_for_decryption_key_cb);
}
void AudioRendererImpl::OnAudioBufferStreamInitialized(bool success) {
@@ -464,6 +489,11 @@ bool AudioRendererImpl::HandleSplicerBuffer_Locked(
algorithm_->EnqueueBuffer(buffer);
}
+ // Store the timestamp of the first packet so we know when to start actual
+ // audio playback.
+ if (first_packet_timestamp_ == kNoTimestamp())
+ first_packet_timestamp_ = buffer->timestamp();
+
switch (state_) {
case kUninitialized:
case kInitializing:
@@ -521,7 +551,7 @@ bool AudioRendererImpl::CanRead_Locked() {
!algorithm_->IsQueueFull();
}
-void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
+void AudioRendererImpl::SetPlaybackRate(double playback_rate) {
DVLOG(1) << __FUNCTION__ << "(" << playback_rate << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_GE(playback_rate, 0);
@@ -532,7 +562,7 @@ void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
// We have two cases here:
// Play: current_playback_rate == 0 && playback_rate != 0
// Pause: current_playback_rate != 0 && playback_rate == 0
- float current_playback_rate = playback_rate_;
+ double current_playback_rate = playback_rate_;
playback_rate_ = playback_rate;
if (!rendering_)
@@ -566,7 +596,12 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
int frames_written = 0;
{
base::AutoLock auto_lock(lock_);
- last_render_ticks_ = base::TimeTicks::Now();
+ last_render_time_ = base::TimeTicks::Now();
+
+ if (!stop_rendering_time_.is_null()) {
+ // TODO(dalecurtis): Use |stop_rendering_time_| to advance the AudioClock.
+ stop_rendering_time_ = base::TimeTicks();
+ }
// Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread.
if (!algorithm_) {
@@ -588,6 +623,30 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
return 0;
}
+ // Delay playback by writing silence if we haven't reached the first
+ // timestamp yet; this can occur if the video starts before the audio.
+ if (algorithm_->frames_buffered() > 0) {
+ DCHECK(first_packet_timestamp_ != kNoTimestamp());
+ const base::TimeDelta play_delay =
+ first_packet_timestamp_ - audio_clock_->back_timestamp();
+ if (play_delay > base::TimeDelta()) {
+ DCHECK_EQ(frames_written, 0);
+ frames_written =
+ std::min(static_cast<int>(play_delay.InSecondsF() *
+ audio_parameters_.sample_rate()),
+ requested_frames);
+ audio_bus->ZeroFramesPartial(0, frames_written);
+ }
+
+ // If there's any space left, actually render the audio; this is where the
+ // aural magic happens.
+ if (frames_written < requested_frames) {
+ frames_written += algorithm_->FillBuffer(
+ audio_bus, frames_written, requested_frames - frames_written,
+ playback_rate_);
+ }
+ }
+
// We use the following conditions to determine end of playback:
// 1) Algorithm can not fill the audio callback buffer
// 2) We received an end of stream buffer
@@ -600,11 +659,7 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
// 3) We are in the kPlaying state
//
// Otherwise the buffer has data we can send to the device.
- if (algorithm_->frames_buffered() > 0) {
- frames_written =
- algorithm_->FillBuffer(audio_bus, requested_frames, playback_rate_);
- }
-
+ //
// Per the TimeSource API the media time should always increase even after
// we've rendered all known audio data. Doing so simplifies scenarios where
// we have other sources of media data that need to be scheduled after audio
diff --git a/chromium/media/filters/audio_renderer_impl.h b/chromium/media/renderers/audio_renderer_impl.h
index 8065b269dac..43d12eabb26 100644
--- a/chromium/media/filters/audio_renderer_impl.h
+++ b/chromium/media/renderers/audio_renderer_impl.h
@@ -16,8 +16,8 @@
// queueing audio data and stretching/shrinking audio data when playback rate !=
// 1.0 or 0.0.
-#ifndef MEDIA_FILTERS_AUDIO_RENDERER_IMPL_H_
-#define MEDIA_FILTERS_AUDIO_RENDERER_IMPL_H_
+#ifndef MEDIA_RENDERERS_AUDIO_RENDERER_IMPL_H_
+#define MEDIA_RENDERERS_AUDIO_RENDERER_IMPL_H_
#include <deque>
@@ -57,33 +57,33 @@ class MEDIA_EXPORT AudioRendererImpl
// |sink| is used as the destination for the rendered audio.
//
// |decoders| contains the AudioDecoders to use when initializing.
- //
- // |set_decryptor_ready_cb| is fired when the audio decryptor is available
- // (only applicable if the stream is encrypted and we have a decryptor).
AudioRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
- const AudioHardwareConfig& hardware_params,
+ const AudioHardwareConfig& hardware_config,
const scoped_refptr<MediaLog>& media_log);
~AudioRendererImpl() override;
// TimeSource implementation.
void StartTicking() override;
void StopTicking() override;
- void SetPlaybackRate(float rate) override;
+ void SetPlaybackRate(double rate) override;
void SetMediaTime(base::TimeDelta time) override;
base::TimeDelta CurrentMediaTime() override;
- base::TimeDelta CurrentMediaTimeForSyncingVideo() override;
+ bool GetWallClockTimes(
+ const std::vector<base::TimeDelta>& media_timestamps,
+ std::vector<base::TimeTicks>* wall_clock_times) override;
// AudioRenderer implementation.
void Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
const StatisticsCB& statistics_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb) override;
+ const PipelineStatusCB& error_cb,
+ const base::Closure& waiting_for_decryption_key_cb) override;
TimeSource* GetTimeSource() override;
void Flush(const base::Closure& callback) override;
void StartPlaying() override;
@@ -175,10 +175,6 @@ class MEDIA_EXPORT AudioRendererImpl
// completed.
void DoFlush_Locked();
- // Calls |decoder_|.Reset() and arranges for ResetDecoderDone() to get
- // called when the reset completes.
- void ResetDecoder();
-
// Called when the |decoder_|.Reset() has completed.
void ResetDecoderDone();
@@ -226,7 +222,7 @@ class MEDIA_EXPORT AudioRendererImpl
base::Lock lock_;
// Algorithm for scaling audio.
- float playback_rate_;
+ double playback_rate_;
scoped_ptr<AudioRendererAlgorithm> algorithm_;
// Simple state tracking variable.
@@ -258,7 +254,15 @@ class MEDIA_EXPORT AudioRendererImpl
// Set every Render() and used to provide an interpolated time value to
// CurrentMediaTimeForSyncingVideo().
- base::TimeTicks last_render_ticks_;
+ base::TimeTicks last_render_time_;
+
+ // Set to the value of |last_render_time_| when StopRendering_Locked() is
+ // called for any reason. Cleared by the next successful Render() call.
+ base::TimeTicks stop_rendering_time_;
+
+ // Set upon receipt of the first decoded buffer after a StartPlayingFrom().
+ // Used to determine how long to delay playback.
+ base::TimeDelta first_packet_timestamp_;
// End variables which must be accessed under |lock_|. ----------------------
@@ -270,4 +274,4 @@ class MEDIA_EXPORT AudioRendererImpl
} // namespace media
-#endif // MEDIA_FILTERS_AUDIO_RENDERER_IMPL_H_
+#endif // MEDIA_RENDERERS_AUDIO_RENDERER_IMPL_H_
diff --git a/chromium/media/filters/audio_renderer_impl_unittest.cc b/chromium/media/renderers/audio_renderer_impl_unittest.cc
index 288d27ccb6a..119fb76b464 100644
--- a/chromium/media/filters/audio_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/audio_renderer_impl_unittest.cc
@@ -4,6 +4,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
+#include "base/format_macros.h"
#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer_converter.h"
@@ -13,7 +14,7 @@
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
-#include "media/filters/audio_renderer_impl.h"
+#include "media/renderers/audio_renderer_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::base::TimeDelta;
@@ -95,7 +96,6 @@ class AudioRendererImplTest : public ::testing::Test {
renderer_.reset(new AudioRendererImpl(message_loop_.message_loop_proxy(),
sink_.get(),
decoders.Pass(),
- SetDecryptorReadyCB(),
hardware_config_,
new MediaLog()));
}
@@ -113,18 +113,19 @@ class AudioRendererImplTest : public ::testing::Test {
MOCK_METHOD1(OnStatistics, void(const PipelineStatistics&));
MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
MOCK_METHOD1(OnError, void(PipelineStatus));
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
void InitializeRenderer(const PipelineStatusCB& pipeline_status_cb) {
+ EXPECT_CALL(*this, OnWaitingForDecryptionKey()).Times(0);
renderer_->Initialize(
- &demuxer_stream_,
- pipeline_status_cb,
+ &demuxer_stream_, pipeline_status_cb, SetDecryptorReadyCB(),
base::Bind(&AudioRendererImplTest::OnStatistics,
base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnBufferingStateChange,
base::Unretained(this)),
- base::Bind(&AudioRendererImplTest::OnEnded,
- base::Unretained(this)),
- base::Bind(&AudioRendererImplTest::OnError,
+ base::Bind(&AudioRendererImplTest::OnEnded, base::Unretained(this)),
+ base::Bind(&AudioRendererImplTest::OnError, base::Unretained(this)),
+ base::Bind(&AudioRendererImplTest::OnWaitingForDecryptionKey,
base::Unretained(this)));
}
@@ -190,17 +191,19 @@ class AudioRendererImplTest : public ::testing::Test {
}
void Preroll() {
- Preroll(0, PIPELINE_OK);
+ Preroll(base::TimeDelta(), base::TimeDelta(), PIPELINE_OK);
}
- void Preroll(int timestamp_ms, PipelineStatus expected) {
- SCOPED_TRACE(base::StringPrintf("Preroll(%d, %d)", timestamp_ms, expected));
-
- TimeDelta timestamp = TimeDelta::FromMilliseconds(timestamp_ms);
- next_timestamp_->SetBaseTimestamp(timestamp);
+ void Preroll(base::TimeDelta start_timestamp,
+ base::TimeDelta first_timestamp,
+ PipelineStatus expected) {
+ SCOPED_TRACE(base::StringPrintf("Preroll(%" PRId64 ", %d)",
+ first_timestamp.InMilliseconds(),
+ expected));
+ next_timestamp_->SetBaseTimestamp(first_timestamp);
// Fill entire buffer to complete prerolling.
- renderer_->SetMediaTime(timestamp);
+ renderer_->SetMediaTime(start_timestamp);
renderer_->StartPlaying();
WaitForPendingRead();
EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
@@ -209,7 +212,7 @@ class AudioRendererImplTest : public ::testing::Test {
void StartTicking() {
renderer_->StartTicking();
- renderer_->SetPlaybackRate(1.0f);
+ renderer_->SetPlaybackRate(1.0);
}
void StopTicking() { renderer_->StopTicking(); }
@@ -516,6 +519,10 @@ TEST_F(AudioRendererImplTest, Underflow_Flush) {
WaitForPendingRead();
StopTicking();
+ // After time stops ticking wall clock times should not be returned.
+ EXPECT_FALSE(
+ renderer_->GetWallClockTimes(std::vector<base::TimeDelta>(1), nullptr));
+
// We shouldn't expect another buffering state change when flushing.
FlushDuringPendingRead();
}
@@ -539,7 +546,9 @@ TEST_F(AudioRendererImplTest, PendingRead_Flush) {
FlushDuringPendingRead();
// Preroll again to a different timestamp and verify it completed normally.
- Preroll(1000, PIPELINE_OK);
+ const base::TimeDelta seek_timestamp =
+ base::TimeDelta::FromMilliseconds(1000);
+ Preroll(seek_timestamp, seek_timestamp, PIPELINE_OK);
}
TEST_F(AudioRendererImplTest, PendingRead_Destroy) {
@@ -642,6 +651,44 @@ TEST_F(AudioRendererImplTest, TimeUpdatesOnFirstBuffer) {
EXPECT_EQ(timestamp_helper.GetTimestamp(), CurrentMediaTime());
}
+TEST_F(AudioRendererImplTest, RenderingDelayedForEarlyStartTime) {
+ Initialize();
+
+ // Choose a first timestamp a few buffers into the future, which ends halfway
+ // through the desired output buffer; this allows for maximum test coverage.
+ const double kBuffers = 4.5;
+ const base::TimeDelta first_timestamp = base::TimeDelta::FromSecondsD(
+ hardware_config_.GetOutputBufferSize() * kBuffers /
+ hardware_config_.GetOutputSampleRate());
+
+ Preroll(base::TimeDelta(), first_timestamp, PIPELINE_OK);
+ StartTicking();
+
+ // Verify the first few buffers are silent.
+ scoped_ptr<AudioBus> bus =
+ AudioBus::Create(hardware_config_.GetOutputConfig());
+ int frames_read = 0;
+ for (int i = 0; i < std::floor(kBuffers); ++i) {
+ EXPECT_TRUE(sink_->Render(bus.get(), 0, &frames_read));
+ EXPECT_EQ(frames_read, bus->frames());
+ for (int j = 0; j < bus->frames(); ++j)
+ ASSERT_FLOAT_EQ(0.0f, bus->channel(0)[j]);
+ WaitForPendingRead();
+ DeliverRemainingAudio();
+ }
+
+ // Verify the last buffer is half silence and half real data.
+ EXPECT_TRUE(sink_->Render(bus.get(), 0, &frames_read));
+ EXPECT_EQ(frames_read, bus->frames());
+ const int zero_frames =
+ bus->frames() * (kBuffers - static_cast<int>(kBuffers));
+
+ for (int i = 0; i < zero_frames; ++i)
+ ASSERT_FLOAT_EQ(0.0f, bus->channel(0)[i]);
+ for (int i = zero_frames; i < bus->frames(); ++i)
+ ASSERT_NE(0.0f, bus->channel(0)[i]);
+}
+
TEST_F(AudioRendererImplTest, ImmediateEndOfStream) {
Initialize();
{
@@ -678,18 +725,18 @@ TEST_F(AudioRendererImplTest, SetPlaybackRate) {
// Rendering hasn't started. Sink should always be paused.
EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
- renderer_->SetPlaybackRate(0.0f);
+ renderer_->SetPlaybackRate(0.0);
EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
- renderer_->SetPlaybackRate(1.0f);
+ renderer_->SetPlaybackRate(1.0);
EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
// Rendering has started with non-zero rate. Rate changes will affect sink
// state.
renderer_->StartTicking();
EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
- renderer_->SetPlaybackRate(0.0f);
+ renderer_->SetPlaybackRate(0.0);
EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
- renderer_->SetPlaybackRate(1.0f);
+ renderer_->SetPlaybackRate(1.0);
EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
// Rendering has stopped. Sink should be paused.
@@ -698,10 +745,10 @@ TEST_F(AudioRendererImplTest, SetPlaybackRate) {
// Start rendering with zero playback rate. Sink should be paused until
// non-zero rate is set.
- renderer_->SetPlaybackRate(0.0f);
+ renderer_->SetPlaybackRate(0.0);
renderer_->StartTicking();
EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
- renderer_->SetPlaybackRate(1.0f);
+ renderer_->SetPlaybackRate(1.0);
EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
}
diff --git a/chromium/media/renderers/default_renderer_factory.cc b/chromium/media/renderers/default_renderer_factory.cc
new file mode 100644
index 00000000000..a2a91d5b164
--- /dev/null
+++ b/chromium/media/renderers/default_renderer_factory.cc
@@ -0,0 +1,88 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/renderers/default_renderer_factory.h"
+
+#include "base/bind.h"
+#include "base/single_thread_task_runner.h"
+#if !defined(MEDIA_DISABLE_FFMPEG)
+#include "media/base/media_log.h"
+#include "media/filters/ffmpeg_audio_decoder.h"
+#include "media/filters/ffmpeg_video_decoder.h"
+#endif
+#include "media/filters/gpu_video_decoder.h"
+#include "media/filters/opus_audio_decoder.h"
+#include "media/renderers/audio_renderer_impl.h"
+#include "media/renderers/gpu_video_accelerator_factories.h"
+#include "media/renderers/renderer_impl.h"
+#include "media/renderers/video_renderer_impl.h"
+#if !defined(MEDIA_DISABLE_LIBVPX)
+#include "media/filters/vpx_video_decoder.h"
+#endif
+
+namespace media {
+
+DefaultRendererFactory::DefaultRendererFactory(
+ const scoped_refptr<MediaLog>& media_log,
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
+ const AudioHardwareConfig& audio_hardware_config)
+ : media_log_(media_log),
+ gpu_factories_(gpu_factories),
+ audio_hardware_config_(audio_hardware_config) {
+}
+
+DefaultRendererFactory::~DefaultRendererFactory() {
+}
+
+// TODO(xhwang): Use RendererConfig to customize what decoders we use.
+scoped_ptr<Renderer> DefaultRendererFactory::CreateRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ AudioRendererSink* audio_renderer_sink,
+ VideoRendererSink* video_renderer_sink) {
+ DCHECK(audio_renderer_sink);
+
+ // Create our audio decoders and renderer.
+ ScopedVector<AudioDecoder> audio_decoders;
+
+#if !defined(MEDIA_DISABLE_FFMPEG)
+ audio_decoders.push_back(new FFmpegAudioDecoder(
+ media_task_runner, base::Bind(&MediaLog::AddLogEvent, media_log_)));
+#endif
+
+ audio_decoders.push_back(new OpusAudioDecoder(media_task_runner));
+
+ scoped_ptr<AudioRenderer> audio_renderer(new AudioRendererImpl(
+ media_task_runner, audio_renderer_sink, audio_decoders.Pass(),
+ audio_hardware_config_, media_log_));
+
+ // Create our video decoders and renderer.
+ ScopedVector<VideoDecoder> video_decoders;
+
+ // |gpu_factories_| requires that its entry points be called on its
+ // |GetTaskRunner()|. Since |pipeline_| will own decoders created from the
+ // factories, require that their message loops are identical.
+ DCHECK(!gpu_factories_.get() ||
+ (gpu_factories_->GetTaskRunner() == media_task_runner.get()));
+
+ if (gpu_factories_.get())
+ video_decoders.push_back(new GpuVideoDecoder(gpu_factories_));
+
+#if !defined(MEDIA_DISABLE_LIBVPX)
+ video_decoders.push_back(new VpxVideoDecoder(media_task_runner));
+#endif
+
+#if !defined(MEDIA_DISABLE_FFMPEG)
+ video_decoders.push_back(new FFmpegVideoDecoder(media_task_runner));
+#endif
+
+ scoped_ptr<VideoRenderer> video_renderer(new VideoRendererImpl(
+ media_task_runner, video_renderer_sink, video_decoders.Pass(), true,
+ gpu_factories_, media_log_));
+
+ // Create renderer.
+ return scoped_ptr<Renderer>(new RendererImpl(
+ media_task_runner, audio_renderer.Pass(), video_renderer.Pass()));
+}
+
+} // namespace media
diff --git a/chromium/media/renderers/default_renderer_factory.h b/chromium/media/renderers/default_renderer_factory.h
new file mode 100644
index 00000000000..05cf2b1e1e5
--- /dev/null
+++ b/chromium/media/renderers/default_renderer_factory.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_RENDERERS_DEFAULT_RENDERER_FACTORY_H_
+#define MEDIA_RENDERERS_DEFAULT_RENDERER_FACTORY_H_
+
+#include "base/callback.h"
+#include "media/base/media_export.h"
+#include "media/base/renderer_factory.h"
+
+namespace media {
+
+class AudioHardwareConfig;
+class AudioRendererSink;
+class GpuVideoAcceleratorFactories;
+class MediaLog;
+class VideoRendererSink;
+
+// The default factory class for creating RendererImpl.
+class MEDIA_EXPORT DefaultRendererFactory : public RendererFactory {
+ public:
+ DefaultRendererFactory(
+ const scoped_refptr<MediaLog>& media_log,
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
+ const AudioHardwareConfig& audio_hardware_config);
+ ~DefaultRendererFactory() final;
+
+ scoped_ptr<Renderer> CreateRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ AudioRendererSink* audio_renderer_sink,
+ VideoRendererSink* video_renderer_sink) final;
+
+ private:
+ scoped_refptr<MediaLog> media_log_;
+
+ // Factories for supporting video accelerators. May be null.
+ scoped_refptr<GpuVideoAcceleratorFactories> gpu_factories_;
+
+ const AudioHardwareConfig& audio_hardware_config_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultRendererFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_RENDERERS_DEFAULT_RENDERER_FACTORY_H_
diff --git a/chromium/media/filters/gpu_video_accelerator_factories.h b/chromium/media/renderers/gpu_video_accelerator_factories.h
index 6ed04c74f03..9782ed1882f 100644
--- a/chromium/media/filters/gpu_video_accelerator_factories.h
+++ b/chromium/media/renderers/gpu_video_accelerator_factories.h
@@ -2,18 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
-#define MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#ifndef MEDIA_RENDERERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#define MEDIA_RENDERERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
#include <vector>
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "media/base/media_export.h"
+#include "media/video/video_decode_accelerator.h"
#include "media/video/video_encode_accelerator.h"
-
-class SkBitmap;
+#include "ui/gfx/gpu_memory_buffer.h"
namespace base {
class SingleThreadTaskRunner;
@@ -60,28 +61,34 @@ class MEDIA_EXPORT GpuVideoAcceleratorFactories
virtual void WaitSyncPoint(uint32 sync_point) = 0;
- // Read pixels within |visible_rect| boundaries from a native texture and
- // store into |pixels| as RGBA.
- virtual void ReadPixels(uint32 texture_id,
- const gfx::Rect& visible_rect,
- const SkBitmap& pixels) = 0;
+ virtual scoped_ptr<gfx::GpuMemoryBuffer> AllocateGpuMemoryBuffer(
+ const gfx::Size& size,
+ gfx::GpuMemoryBuffer::Format format,
+ gfx::GpuMemoryBuffer::Usage usage) = 0;
+
+ virtual bool IsTextureRGSupported() = 0;
- // Allocate & return a shared memory segment. Caller is responsible for
- // Close()ing the returned pointer.
- virtual base::SharedMemory* CreateSharedMemory(size_t size) = 0;
+ virtual gpu::gles2::GLES2Interface* GetGLES2Interface() = 0;
+
+ // Allocate & return a shared memory segment.
+ virtual scoped_ptr<base::SharedMemory> CreateSharedMemory(size_t size) = 0;
// Returns the task runner the video accelerator runs on.
virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
+ // Returns the supported codec profiles of video decode accelerator.
+ virtual VideoDecodeAccelerator::SupportedProfiles
+ GetVideoDecodeAcceleratorSupportedProfiles() = 0;
+
// Returns the supported codec profiles of video encode accelerator.
- virtual std::vector<VideoEncodeAccelerator::SupportedProfile>
+ virtual VideoEncodeAccelerator::SupportedProfiles
GetVideoEncodeAcceleratorSupportedProfiles() = 0;
protected:
friend class base::RefCountedThreadSafe<GpuVideoAcceleratorFactories>;
- virtual ~GpuVideoAcceleratorFactories();
+ virtual ~GpuVideoAcceleratorFactories() {}
};
} // namespace media
-#endif // MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#endif // MEDIA_RENDERERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
diff --git a/chromium/media/renderers/mock_gpu_video_accelerator_factories.cc b/chromium/media/renderers/mock_gpu_video_accelerator_factories.cc
new file mode 100644
index 00000000000..8dda0b2420c
--- /dev/null
+++ b/chromium/media/renderers/mock_gpu_video_accelerator_factories.cc
@@ -0,0 +1,74 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/renderers/mock_gpu_video_accelerator_factories.h"
+
+#include "ui/gfx/gpu_memory_buffer.h"
+
+namespace media {
+
+namespace {
+
+class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
+ public:
+ GpuMemoryBufferImpl(const gfx::Size& size) : size_(size) {
+ bytes_.resize(size_.GetArea());
+ }
+
+ // Overridden from gfx::GpuMemoryBuffer:
+ bool Map(void** data) override {
+ data[0] = &bytes_[0];
+ return true;
+ }
+ void Unmap() override{};
+ bool IsMapped() const override {
+ NOTREACHED();
+ return false;
+ }
+ Format GetFormat() const override { return gfx::GpuMemoryBuffer::R_8; }
+ void GetStride(int* stride) const override { stride[0] = size_.width(); }
+ gfx::GpuMemoryBufferHandle GetHandle() const override {
+ NOTREACHED();
+ return gfx::GpuMemoryBufferHandle();
+ }
+ ClientBuffer AsClientBuffer() override {
+ return reinterpret_cast<ClientBuffer>(this);
+ }
+
+ private:
+ std::vector<unsigned char> bytes_;
+ const gfx::Size size_;
+};
+
+} // unnamed namespace
+
+MockGpuVideoAcceleratorFactories::MockGpuVideoAcceleratorFactories() {}
+
+MockGpuVideoAcceleratorFactories::~MockGpuVideoAcceleratorFactories() {}
+
+scoped_ptr<gfx::GpuMemoryBuffer>
+MockGpuVideoAcceleratorFactories::AllocateGpuMemoryBuffer(
+ const gfx::Size& size,
+ gfx::GpuMemoryBuffer::Format format,
+ gfx::GpuMemoryBuffer::Usage usage) {
+ DCHECK_EQ(gfx::GpuMemoryBuffer::R_8, format);
+ return make_scoped_ptr<gfx::GpuMemoryBuffer>(new GpuMemoryBufferImpl(size));
+}
+
+scoped_ptr<base::SharedMemory>
+MockGpuVideoAcceleratorFactories::CreateSharedMemory(size_t size) {
+ return nullptr;
+}
+
+scoped_ptr<VideoDecodeAccelerator>
+MockGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator() {
+ return scoped_ptr<VideoDecodeAccelerator>(DoCreateVideoDecodeAccelerator());
+}
+
+scoped_ptr<VideoEncodeAccelerator>
+MockGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator() {
+ return scoped_ptr<VideoEncodeAccelerator>(DoCreateVideoEncodeAccelerator());
+}
+
+} // namespace media
diff --git a/chromium/media/filters/mock_gpu_video_accelerator_factories.h b/chromium/media/renderers/mock_gpu_video_accelerator_factories.h
index 949f35d79a7..f83030c52bc 100644
--- a/chromium/media/filters/mock_gpu_video_accelerator_factories.h
+++ b/chromium/media/renderers/mock_gpu_video_accelerator_factories.h
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
-#define MEDIA_FILTERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#ifndef MEDIA_RENDERERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#define MEDIA_RENDERERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
#include "base/memory/scoped_ptr.h"
#include "base/single_thread_task_runner.h"
-#include "media/filters/gpu_video_accelerator_factories.h"
+#include "media/renderers/gpu_video_accelerator_factories.h"
#include "media/video/video_decode_accelerator.h"
#include "media/video/video_encode_accelerator.h"
#include "testing/gmock/include/gmock/gmock.h"
-#include "third_party/skia/include/core/SkBitmap.h"
template <class T>
class scoped_refptr;
@@ -39,27 +38,32 @@ class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
uint32 texture_target));
MOCK_METHOD1(DeleteTexture, void(uint32 texture_id));
MOCK_METHOD1(WaitSyncPoint, void(uint32 sync_point));
- MOCK_METHOD3(ReadPixels,
- void(uint32 texture_id,
- const gfx::Rect& visible_rect,
- const SkBitmap& pixels));
- MOCK_METHOD1(CreateSharedMemory, base::SharedMemory*(size_t size));
MOCK_METHOD0(GetTaskRunner, scoped_refptr<base::SingleThreadTaskRunner>());
+ MOCK_METHOD0(GetVideoDecodeAcceleratorSupportedProfiles,
+ VideoDecodeAccelerator::SupportedProfiles());
MOCK_METHOD0(GetVideoEncodeAcceleratorSupportedProfiles,
- std::vector<VideoEncodeAccelerator::SupportedProfile>());
+ VideoEncodeAccelerator::SupportedProfiles());
- virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator()
- override;
+ scoped_ptr<gfx::GpuMemoryBuffer> AllocateGpuMemoryBuffer(
+ const gfx::Size& size,
+ gfx::GpuMemoryBuffer::Format format,
+ gfx::GpuMemoryBuffer::Usage usage) override;
- virtual scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator()
- override;
+ MOCK_METHOD0(IsTextureRGSupported, bool());
+ MOCK_METHOD0(GetGLES2Interface, gpu::gles2::GLES2Interface*());
+
+ scoped_ptr<base::SharedMemory> CreateSharedMemory(size_t size) override;
+
+ scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator() override;
+
+ scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator() override;
private:
- virtual ~MockGpuVideoAcceleratorFactories();
+ ~MockGpuVideoAcceleratorFactories() override;
DISALLOW_COPY_AND_ASSIGN(MockGpuVideoAcceleratorFactories);
};
} // namespace media
-#endif // MEDIA_FILTERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#endif // MEDIA_RENDERERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
diff --git a/chromium/media/filters/renderer_impl.cc b/chromium/media/renderers/renderer_impl.cc
index 6416a3dd577..8efe432a7cc 100644
--- a/chromium/media/filters/renderer_impl.cc
+++ b/chromium/media/renderers/renderer_impl.cc
@@ -2,22 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/renderer_impl.h"
+#include "media/renderers/renderer_impl.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
+#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
+#include "base/strings/string_number_conversions.h"
#include "media/base/audio_renderer.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/demuxer_stream_provider.h"
+#include "media/base/media_switches.h"
#include "media/base/time_source.h"
#include "media/base/video_renderer.h"
#include "media/base/wall_clock_time_source.h"
namespace media {
+// See |video_underflow_threshold_|.
+static const int kDefaultVideoUnderflowThresholdMs = 3000;
+
RendererImpl::RendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
scoped_ptr<AudioRenderer> audio_renderer,
@@ -28,15 +35,30 @@ RendererImpl::RendererImpl(
video_renderer_(video_renderer.Pass()),
time_source_(NULL),
time_ticking_(false),
+ playback_rate_(0.0),
audio_buffering_state_(BUFFERING_HAVE_NOTHING),
video_buffering_state_(BUFFERING_HAVE_NOTHING),
audio_ended_(false),
video_ended_(false),
+ cdm_context_(nullptr),
underflow_disabled_for_testing_(false),
clockless_video_playback_enabled_for_testing_(false),
- weak_factory_(this),
- weak_this_(weak_factory_.GetWeakPtr()) {
+ video_underflow_threshold_(
+ base::TimeDelta::FromMilliseconds(kDefaultVideoUnderflowThresholdMs)),
+ weak_factory_(this) {
+ weak_this_ = weak_factory_.GetWeakPtr();
DVLOG(1) << __FUNCTION__;
+
+ // TODO(dalecurtis): Remove once experiments for http://crbug.com/470940 are
+ // complete.
+ int threshold_ms = 0;
+ std::string threshold_ms_str(
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kVideoUnderflowThresholdMs));
+ if (base::StringToInt(threshold_ms_str, &threshold_ms) && threshold_ms > 0) {
+ video_underflow_threshold_ =
+ base::TimeDelta::FromMilliseconds(threshold_ms);
+ }
}
RendererImpl::~RendererImpl() {
@@ -48,43 +70,76 @@ RendererImpl::~RendererImpl() {
video_renderer_.reset();
audio_renderer_.reset();
- FireAllPendingCallbacks();
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
+ else if (!flush_cb_.is_null())
+ base::ResetAndReturn(&flush_cb_).Run();
}
-void RendererImpl::Initialize(DemuxerStreamProvider* demuxer_stream_provider,
- const base::Closure& init_cb,
- const StatisticsCB& statistics_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const BufferingStateCB& buffering_state_cb) {
+void RendererImpl::Initialize(
+ DemuxerStreamProvider* demuxer_stream_provider,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const base::Closure& waiting_for_decryption_key_cb) {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_UNINITIALIZED) << state_;
+ DCHECK_EQ(state_, STATE_UNINITIALIZED);
DCHECK(!init_cb.is_null());
DCHECK(!statistics_cb.is_null());
+ DCHECK(!buffering_state_cb.is_null());
DCHECK(!ended_cb.is_null());
DCHECK(!error_cb.is_null());
- DCHECK(!buffering_state_cb.is_null());
DCHECK(demuxer_stream_provider->GetStream(DemuxerStream::AUDIO) ||
demuxer_stream_provider->GetStream(DemuxerStream::VIDEO));
demuxer_stream_provider_ = demuxer_stream_provider;
statistics_cb_ = statistics_cb;
+ buffering_state_cb_ = buffering_state_cb;
ended_cb_ = ended_cb;
error_cb_ = error_cb;
- buffering_state_cb_ = buffering_state_cb;
-
init_cb_ = init_cb;
+ waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
+
state_ = STATE_INITIALIZING;
InitializeAudioRenderer();
}
+void RendererImpl::SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(cdm_context);
+
+ if (cdm_context_) {
+ DVLOG(1) << "Switching CDM not supported.";
+ cdm_attached_cb.Run(false);
+ return;
+ }
+
+ cdm_context_ = cdm_context;
+
+ if (decryptor_ready_cb_.is_null()) {
+ cdm_attached_cb.Run(true);
+ return;
+ }
+
+ base::ResetAndReturn(&decryptor_ready_cb_)
+ .Run(cdm_context->GetDecryptor(), cdm_attached_cb);
+}
+
void RendererImpl::Flush(const base::Closure& flush_cb) {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_PLAYING) << state_;
DCHECK(flush_cb_.is_null());
+ if (state_ != STATE_PLAYING) {
+ DCHECK_EQ(state_, STATE_ERROR);
+ return;
+ }
+
flush_cb_ = flush_cb;
state_ = STATE_FLUSHING;
@@ -97,7 +152,11 @@ void RendererImpl::Flush(const base::Closure& flush_cb) {
void RendererImpl::StartPlayingFrom(base::TimeDelta time) {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_PLAYING) << state_;
+
+ if (state_ != STATE_PLAYING) {
+ DCHECK_EQ(state_, STATE_ERROR);
+ return;
+ }
time_source_->SetMediaTime(time);
@@ -107,7 +166,7 @@ void RendererImpl::StartPlayingFrom(base::TimeDelta time) {
video_renderer_->StartPlayingFrom(time);
}
-void RendererImpl::SetPlaybackRate(float playback_rate) {
+void RendererImpl::SetPlaybackRate(double playback_rate) {
DVLOG(1) << __FUNCTION__ << "(" << playback_rate << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -116,6 +175,16 @@ void RendererImpl::SetPlaybackRate(float playback_rate) {
return;
time_source_->SetPlaybackRate(playback_rate);
+
+ const double old_rate = playback_rate_;
+ playback_rate_ = playback_rate;
+ if (!time_ticking_ || !video_renderer_)
+ return;
+
+ if (old_rate == 0 && playback_rate > 0)
+ video_renderer_->OnTimeStateChanged(true);
+ else if (old_rate > 0 && playback_rate == 0)
+ video_renderer_->OnTimeStateChanged(false);
}
void RendererImpl::SetVolume(float volume) {
@@ -142,14 +211,6 @@ bool RendererImpl::HasVideo() {
return video_renderer_ != NULL;
}
-void RendererImpl::SetCdm(MediaKeys* cdm) {
- DVLOG(1) << __FUNCTION__;
- DCHECK(task_runner_->BelongsToCurrentThread());
- // TODO(xhwang): Explore to possibility to move CDM setting from
- // WebMediaPlayerImpl to this class. See http://crbug.com/401264
- NOTREACHED();
-}
-
void RendererImpl::DisableUnderflowForTesting() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -168,22 +229,50 @@ void RendererImpl::EnableClocklessVideoPlaybackForTesting() {
clockless_video_playback_enabled_for_testing_ = true;
}
-base::TimeDelta RendererImpl::GetMediaTimeForSyncingVideo() {
+bool RendererImpl::GetWallClockTimes(
+ const std::vector<base::TimeDelta>& media_timestamps,
+ std::vector<base::TimeTicks>* wall_clock_times) {
// No BelongsToCurrentThread() checking because this can be called from other
// threads.
//
// TODO(scherkus): Currently called from VideoRendererImpl's internal thread,
// which should go away at some point http://crbug.com/110814
- if (clockless_video_playback_enabled_for_testing_)
- return base::TimeDelta::Max();
+ if (clockless_video_playback_enabled_for_testing_) {
+ *wall_clock_times = std::vector<base::TimeTicks>(media_timestamps.size(),
+ base::TimeTicks::Now());
+ return true;
+ }
+
+ return time_source_->GetWallClockTimes(media_timestamps, wall_clock_times);
+}
+
+void RendererImpl::SetDecryptorReadyCallback(
+ const DecryptorReadyCB& decryptor_ready_cb) {
+ // Cancels the previous decryptor request.
+ if (decryptor_ready_cb.is_null()) {
+ if (!decryptor_ready_cb_.is_null()) {
+ base::ResetAndReturn(&decryptor_ready_cb_)
+ .Run(nullptr, base::Bind(IgnoreCdmAttached));
+ }
+ return;
+ }
+
+ // We initialize audio and video decoders in sequence.
+ DCHECK(decryptor_ready_cb_.is_null());
+
+ if (cdm_context_) {
+ decryptor_ready_cb.Run(cdm_context_->GetDecryptor(),
+ base::Bind(IgnoreCdmAttached));
+ return;
+ }
- return time_source_->CurrentMediaTimeForSyncingVideo();
+ decryptor_ready_cb_ = decryptor_ready_cb;
}
void RendererImpl::InitializeAudioRenderer() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
+ DCHECK_EQ(state_, STATE_INITIALIZING);
DCHECK(!init_cb_.is_null());
PipelineStatusCB done_cb =
@@ -195,35 +284,44 @@ void RendererImpl::InitializeAudioRenderer() {
return;
}
+ // Note: After the initialization of a renderer, error events from it may
+ // happen at any time and all future calls must guard against STATE_ERROR.
audio_renderer_->Initialize(
- demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO),
- done_cb,
+ demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO), done_cb,
+ base::Bind(&RendererImpl::SetDecryptorReadyCallback, weak_this_),
base::Bind(&RendererImpl::OnUpdateStatistics, weak_this_),
base::Bind(&RendererImpl::OnBufferingStateChanged, weak_this_,
&audio_buffering_state_),
base::Bind(&RendererImpl::OnAudioRendererEnded, weak_this_),
- base::Bind(&RendererImpl::OnError, weak_this_));
+ base::Bind(&RendererImpl::OnError, weak_this_),
+ waiting_for_decryption_key_cb_);
}
void RendererImpl::OnAudioRendererInitializeDone(PipelineStatus status) {
DVLOG(1) << __FUNCTION__ << ": " << status;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
- DCHECK(!init_cb_.is_null());
- if (status != PIPELINE_OK) {
+ // OnError() may be fired at any time by the renderers, even if they thought
+ // they initialized successfully (due to delayed output device setup).
+ if (state_ != STATE_INITIALIZING) {
+ DCHECK(init_cb_.is_null());
audio_renderer_.reset();
- OnError(status);
return;
}
+ if (status != PIPELINE_OK) {
+ base::ResetAndReturn(&init_cb_).Run(status);
+ return;
+ }
+
+ DCHECK(!init_cb_.is_null());
InitializeVideoRenderer();
}
void RendererImpl::InitializeVideoRenderer() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
+ DCHECK_EQ(state_, STATE_INITIALIZING);
DCHECK(!init_cb_.is_null());
PipelineStatusCB done_cb =
@@ -236,36 +334,40 @@ void RendererImpl::InitializeVideoRenderer() {
}
video_renderer_->Initialize(
- demuxer_stream_provider_->GetStream(DemuxerStream::VIDEO),
- demuxer_stream_provider_->GetLiveness() ==
- DemuxerStreamProvider::LIVENESS_LIVE,
- done_cb,
+ demuxer_stream_provider_->GetStream(DemuxerStream::VIDEO), done_cb,
+ base::Bind(&RendererImpl::SetDecryptorReadyCallback, weak_this_),
base::Bind(&RendererImpl::OnUpdateStatistics, weak_this_),
- base::Bind(&RendererImpl::OnBufferingStateChanged,
- weak_this_,
+ base::Bind(&RendererImpl::OnBufferingStateChanged, weak_this_,
&video_buffering_state_),
base::Bind(&RendererImpl::OnVideoRendererEnded, weak_this_),
base::Bind(&RendererImpl::OnError, weak_this_),
- base::Bind(&RendererImpl::GetMediaTimeForSyncingVideo,
- base::Unretained(this)));
+ base::Bind(&RendererImpl::GetWallClockTimes, base::Unretained(this)),
+ waiting_for_decryption_key_cb_);
}
void RendererImpl::OnVideoRendererInitializeDone(PipelineStatus status) {
DVLOG(1) << __FUNCTION__ << ": " << status;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
- DCHECK(!init_cb_.is_null());
- if (status != PIPELINE_OK) {
+ // OnError() may be fired at any time by the renderers, even if they thought
+ // they initialized successfully (due to delayed output device setup).
+ if (state_ != STATE_INITIALIZING) {
+ DCHECK(init_cb_.is_null());
audio_renderer_.reset();
video_renderer_.reset();
- OnError(status);
+ return;
+ }
+
+ DCHECK(!init_cb_.is_null());
+
+ if (status != PIPELINE_OK) {
+ base::ResetAndReturn(&init_cb_).Run(status);
return;
}
if (audio_renderer_) {
time_source_ = audio_renderer_->GetTimeSource();
- } else {
+ } else if (!time_source_) {
wall_clock_time_source_.reset(new WallClockTimeSource());
time_source_ = wall_clock_time_source_.get();
}
@@ -273,13 +375,13 @@ void RendererImpl::OnVideoRendererInitializeDone(PipelineStatus status) {
state_ = STATE_PLAYING;
DCHECK(time_source_);
DCHECK(audio_renderer_ || video_renderer_);
- base::ResetAndReturn(&init_cb_).Run();
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
}
void RendererImpl::FlushAudioRenderer() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK_EQ(state_, STATE_FLUSHING);
DCHECK(!flush_cb_.is_null());
if (!audio_renderer_) {
@@ -300,9 +402,13 @@ void RendererImpl::OnAudioRendererFlushDone() {
return;
}
- DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK_EQ(state_, STATE_FLUSHING);
DCHECK(!flush_cb_.is_null());
+ // If we had a deferred video renderer underflow prior to the flush, it should
+ // have been cleared by the audio renderer changing to BUFFERING_HAVE_NOTHING.
+ DCHECK(deferred_underflow_cb_.IsCancelled());
+
DCHECK_EQ(audio_buffering_state_, BUFFERING_HAVE_NOTHING);
audio_ended_ = false;
FlushVideoRenderer();
@@ -311,7 +417,7 @@ void RendererImpl::OnAudioRendererFlushDone() {
void RendererImpl::FlushVideoRenderer() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK_EQ(state_, STATE_FLUSHING);
DCHECK(!flush_cb_.is_null());
if (!video_renderer_) {
@@ -332,7 +438,7 @@ void RendererImpl::OnVideoRendererFlushDone() {
return;
}
- DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK_EQ(state_, STATE_FLUSHING);
DCHECK(!flush_cb_.is_null());
DCHECK_EQ(video_buffering_state_, BUFFERING_HAVE_NOTHING);
@@ -348,12 +454,39 @@ void RendererImpl::OnUpdateStatistics(const PipelineStatistics& stats) {
void RendererImpl::OnBufferingStateChanged(BufferingState* buffering_state,
BufferingState new_buffering_state) {
+ const bool is_audio = buffering_state == &audio_buffering_state_;
DVLOG(1) << __FUNCTION__ << "(" << *buffering_state << ", "
- << new_buffering_state << ") "
- << (buffering_state == &audio_buffering_state_ ? "audio" : "video");
+ << new_buffering_state << ") " << (is_audio ? "audio" : "video");
DCHECK(task_runner_->BelongsToCurrentThread());
+
bool was_waiting_for_enough_data = WaitingForEnoughData();
+ // When audio is present and has enough data, defer video underflow callbacks
+ // for some time to avoid unnecessary glitches in audio; see
+ // http://crbug.com/144683#c53.
+ if (audio_renderer_ && !is_audio && state_ == STATE_PLAYING) {
+ if (video_buffering_state_ == BUFFERING_HAVE_ENOUGH &&
+ audio_buffering_state_ == BUFFERING_HAVE_ENOUGH &&
+ new_buffering_state == BUFFERING_HAVE_NOTHING &&
+ deferred_underflow_cb_.IsCancelled()) {
+ deferred_underflow_cb_.Reset(base::Bind(
+ &RendererImpl::OnBufferingStateChanged, weak_factory_.GetWeakPtr(),
+ buffering_state, new_buffering_state));
+ task_runner_->PostDelayedTask(FROM_HERE,
+ deferred_underflow_cb_.callback(),
+ video_underflow_threshold_);
+ return;
+ }
+
+ deferred_underflow_cb_.Cancel();
+ } else if (!deferred_underflow_cb_.IsCancelled() && is_audio &&
+ new_buffering_state == BUFFERING_HAVE_NOTHING) {
+ // If audio underflows while we have a deferred video underflow in progress
+ // we want to mark video as underflowed immediately and cancel the deferral.
+ deferred_underflow_cb_.Cancel();
+ video_buffering_state_ = BUFFERING_HAVE_NOTHING;
+ }
+
*buffering_state = new_buffering_state;
// Disable underflow by ignoring updates that renderers have ran out of data.
@@ -407,13 +540,18 @@ void RendererImpl::PausePlayback() {
case STATE_UNINITIALIZED:
case STATE_INITIALIZING:
- case STATE_ERROR:
NOTREACHED() << "Invalid state: " << state_;
break;
+
+ case STATE_ERROR:
+ // An error state may occur at any time.
+ break;
}
time_ticking_ = false;
time_source_->StopTicking();
+ if (playback_rate_ > 0 && video_renderer_)
+ video_renderer_->OnTimeStateChanged(false);
}
void RendererImpl::StartPlayback() {
@@ -425,6 +563,8 @@ void RendererImpl::StartPlayback() {
time_ticking_ = true;
time_source_->StartTicking();
+ if (playback_rate_ > 0 && video_renderer_)
+ video_renderer_->OnTimeStateChanged(true);
}
void RendererImpl::OnAudioRendererEnded() {
@@ -484,19 +624,20 @@ void RendererImpl::OnError(PipelineStatus error) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_NE(PIPELINE_OK, error) << "PIPELINE_OK isn't an error!";
- state_ = STATE_ERROR;
-
- // Pipeline will destroy |this| as the result of error.
- base::ResetAndReturn(&error_cb_).Run(error);
+ // An error has already been delivered.
+ if (state_ == STATE_ERROR)
+ return;
- FireAllPendingCallbacks();
-}
+ const State old_state = state_;
+ state_ = STATE_ERROR;
-void RendererImpl::FireAllPendingCallbacks() {
- DCHECK(task_runner_->BelongsToCurrentThread());
+ if (old_state == STATE_INITIALIZING) {
+ base::ResetAndReturn(&init_cb_).Run(error);
+ return;
+ }
- if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run();
+ // After OnError() returns, the pipeline may destroy |this|.
+ base::ResetAndReturn(&error_cb_).Run(error);
if (!flush_cb_.is_null())
base::ResetAndReturn(&flush_cb_).Run();
diff --git a/chromium/media/filters/renderer_impl.h b/chromium/media/renderers/renderer_impl.h
index 67902d09326..56887c2a53c 100644
--- a/chromium/media/filters/renderer_impl.h
+++ b/chromium/media/renderers/renderer_impl.h
@@ -2,9 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_RENDERER_IMPL_H_
-#define MEDIA_FILTERS_RENDERER_IMPL_H_
+#ifndef MEDIA_RENDERERS_RENDERER_IMPL_H_
+#define MEDIA_RENDERERS_RENDERER_IMPL_H_
+#include <vector>
+
+#include "base/cancelable_callback.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
@@ -13,6 +16,7 @@
#include "base/time/default_tick_clock.h"
#include "base/time/time.h"
#include "media/base/buffering_state.h"
+#include "media/base/decryptor.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
#include "media/base/renderer.h"
@@ -39,27 +43,35 @@ class MEDIA_EXPORT RendererImpl : public Renderer {
scoped_ptr<AudioRenderer> audio_renderer,
scoped_ptr<VideoRenderer> video_renderer);
- ~RendererImpl() override;
+ ~RendererImpl() final;
// Renderer implementation.
void Initialize(DemuxerStreamProvider* demuxer_stream_provider,
- const base::Closure& init_cb,
+ const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
- const BufferingStateCB& buffering_state_cb) override;
- void Flush(const base::Closure& flush_cb) override;
- void StartPlayingFrom(base::TimeDelta time) override;
- void SetPlaybackRate(float playback_rate) override;
- void SetVolume(float volume) override;
- base::TimeDelta GetMediaTime() override;
- bool HasAudio() override;
- bool HasVideo() override;
- void SetCdm(MediaKeys* cdm) override;
+ const base::Closure& waiting_for_decryption_key_cb) final;
+ void SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) final;
+ void Flush(const base::Closure& flush_cb) final;
+ void StartPlayingFrom(base::TimeDelta time) final;
+ void SetPlaybackRate(double playback_rate) final;
+ void SetVolume(float volume) final;
+ base::TimeDelta GetMediaTime() final;
+ bool HasAudio() final;
+ bool HasVideo() final;
// Helper functions for testing purposes. Must be called before Initialize().
void DisableUnderflowForTesting();
void EnableClocklessVideoPlaybackForTesting();
+ void set_time_source_for_testing(TimeSource* time_source) {
+ time_source_ = time_source;
+ }
+ void set_video_underflow_threshold_for_testing(base::TimeDelta threshold) {
+ video_underflow_threshold_ = threshold;
+ }
private:
enum State {
@@ -70,7 +82,14 @@ class MEDIA_EXPORT RendererImpl : public Renderer {
STATE_ERROR
};
- base::TimeDelta GetMediaTimeForSyncingVideo();
+ bool GetWallClockTimes(const std::vector<base::TimeDelta>& media_timestamps,
+ std::vector<base::TimeTicks>* wall_clock_times);
+
+ // Requests that this object notifies when a decryptor is ready through the
+ // |decryptor_ready_cb| provided.
+ // If |decryptor_ready_cb| is null, the existing callback will be fired with
+ // nullptr immediately and reset.
+ void SetDecryptorReadyCallback(const DecryptorReadyCB& decryptor_ready_cb);
// Helper functions and callbacks for Initialize().
void InitializeAudioRenderer();
@@ -111,8 +130,6 @@ class MEDIA_EXPORT RendererImpl : public Renderer {
// Callback executed when a runtime error happens.
void OnError(PipelineStatus error);
- void FireAllPendingCallbacks();
-
State state_;
// Task runner used to execute pipeline tasks.
@@ -125,9 +142,10 @@ class MEDIA_EXPORT RendererImpl : public Renderer {
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
BufferingStateCB buffering_state_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
// Temporary callback used for Initialize() and Flush().
- base::Closure init_cb_;
+ PipelineStatusCB init_cb_;
base::Closure flush_cb_;
scoped_ptr<AudioRenderer> audio_renderer_;
@@ -137,6 +155,7 @@ class MEDIA_EXPORT RendererImpl : public Renderer {
TimeSource* time_source_;
scoped_ptr<WallClockTimeSource> wall_clock_time_source_;
bool time_ticking_;
+ double playback_rate_;
// The time to start playback from after starting/seeking has completed.
base::TimeDelta start_time_;
@@ -148,16 +167,32 @@ class MEDIA_EXPORT RendererImpl : public Renderer {
bool audio_ended_;
bool video_ended_;
+ CdmContext* cdm_context_;
+
+ // Callback registered by filters (decoder or demuxer) to be informed of a
+ // Decryptor.
+ // Note: We could have multiple filters registering this callback. One
+ // callback is okay because:
+ // 1, We always initialize filters in sequence.
+ // 2, Filter initialization will not finish until this callback is satisfied.
+ DecryptorReadyCB decryptor_ready_cb_;
+
bool underflow_disabled_for_testing_;
bool clockless_video_playback_enabled_for_testing_;
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<RendererImpl> weak_factory_;
+ // Used to defer underflow for video when audio is present.
+ base::CancelableClosure deferred_underflow_cb_;
+
+ // The amount of time to wait before declaring underflow if the video renderer
+ // runs out of data but the audio renderer still has enough.
+ base::TimeDelta video_underflow_threshold_;
+
base::WeakPtr<RendererImpl> weak_this_;
+ base::WeakPtrFactory<RendererImpl> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(RendererImpl);
};
} // namespace media
-#endif // MEDIA_FILTERS_RENDERER_IMPL_H_
+#endif // MEDIA_RENDERERS_RENDERER_IMPL_H_
diff --git a/chromium/media/filters/renderer_impl_unittest.cc b/chromium/media/renderers/renderer_impl_unittest.cc
index 5db62055e94..e4396580aa9 100644
--- a/chromium/media/filters/renderer_impl_unittest.cc
+++ b/chromium/media/renderers/renderer_impl_unittest.cc
@@ -11,7 +11,7 @@
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
-#include "media/filters/renderer_impl.h"
+#include "media/renderers/renderer_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -43,12 +43,13 @@ class RendererImplTest : public ::testing::Test {
CallbackHelper() {}
virtual ~CallbackHelper() {}
- MOCK_METHOD0(OnInitialize, void());
+ MOCK_METHOD1(OnInitialize, void(PipelineStatus));
MOCK_METHOD0(OnFlushed, void());
MOCK_METHOD0(OnEnded, void());
MOCK_METHOD1(OnError, void(PipelineStatus));
MOCK_METHOD1(OnUpdateStatistics, void(const PipelineStatistics&));
MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void());
private:
DISALLOW_COPY_AND_ASSIGN(CallbackHelper);
@@ -67,8 +68,6 @@ class RendererImplTest : public ::testing::Test {
DemuxerStream* null_pointer = NULL;
EXPECT_CALL(*demuxer_, GetStream(_))
.WillRepeatedly(Return(null_pointer));
- EXPECT_CALL(*demuxer_, GetLiveness())
- .WillRepeatedly(Return(Demuxer::LIVENESS_UNKNOWN));
}
virtual ~RendererImplTest() {
@@ -89,31 +88,29 @@ class RendererImplTest : public ::testing::Test {
// Sets up expectations to allow the audio renderer to initialize.
void SetAudioRendererInitializeExpectations(PipelineStatus status) {
EXPECT_CALL(*audio_renderer_,
- Initialize(audio_stream_.get(), _, _, _, _, _))
- .WillOnce(DoAll(SaveArg<3>(&audio_buffering_state_cb_),
- SaveArg<4>(&audio_ended_cb_),
- SaveArg<5>(&audio_error_cb_),
- RunCallback<1>(status)));
+ Initialize(audio_stream_.get(), _, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<4>(&audio_buffering_state_cb_),
+ SaveArg<5>(&audio_ended_cb_),
+ SaveArg<6>(&audio_error_cb_), RunCallback<1>(status)));
}
// Sets up expectations to allow the video renderer to initialize.
void SetVideoRendererInitializeExpectations(PipelineStatus status) {
EXPECT_CALL(*video_renderer_,
- Initialize(video_stream_.get(), _, _, _, _, _, _, _))
+ Initialize(video_stream_.get(), _, _, _, _, _, _, _, _))
.WillOnce(DoAll(SaveArg<4>(&video_buffering_state_cb_),
- SaveArg<5>(&video_ended_cb_),
- RunCallback<2>(status)));
+ SaveArg<5>(&video_ended_cb_), RunCallback<1>(status)));
}
void InitializeAndExpect(PipelineStatus start_status) {
- if (start_status != PIPELINE_OK)
- EXPECT_CALL(callbacks_, OnError(start_status));
-
- EXPECT_CALL(callbacks_, OnInitialize());
+ EXPECT_CALL(callbacks_, OnInitialize(start_status));
+ EXPECT_CALL(callbacks_, OnWaitingForDecryptionKey()).Times(0);
if (start_status == PIPELINE_OK && audio_stream_) {
EXPECT_CALL(*audio_renderer_, GetTimeSource())
.WillOnce(Return(&time_source_));
+ } else {
+ renderer_impl_->set_time_source_for_testing(&time_source_);
}
renderer_impl_->Initialize(
@@ -122,9 +119,11 @@ class RendererImplTest : public ::testing::Test {
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnUpdateStatistics,
base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnBufferingStateChange,
+ base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnBufferingStateChange,
+ base::Bind(&CallbackHelper::OnWaitingForDecryptionKey,
base::Unretained(&callbacks_)));
base::RunLoop().RunUntilIdle();
}
@@ -174,10 +173,10 @@ class RendererImplTest : public ::testing::Test {
base::TimeDelta start_time(
base::TimeDelta::FromMilliseconds(kStartPlayingTimeInMs));
+ EXPECT_CALL(time_source_, SetMediaTime(start_time));
+ EXPECT_CALL(time_source_, StartTicking());
if (audio_stream_) {
- EXPECT_CALL(time_source_, SetMediaTime(start_time));
- EXPECT_CALL(time_source_, StartTicking());
EXPECT_CALL(*audio_renderer_, StartPlaying())
.WillOnce(SetBufferingState(&audio_buffering_state_cb_,
BUFFERING_HAVE_ENOUGH));
@@ -194,9 +193,10 @@ class RendererImplTest : public ::testing::Test {
}
void Flush(bool underflowed) {
+ if (!underflowed)
+ EXPECT_CALL(time_source_, StopTicking());
+
if (audio_stream_) {
- if (!underflowed)
- EXPECT_CALL(time_source_, StopTicking());
EXPECT_CALL(*audio_renderer_, Flush(_))
.WillOnce(DoAll(SetBufferingState(&audio_buffering_state_cb_,
BUFFERING_HAVE_NOTHING),
@@ -217,7 +217,7 @@ class RendererImplTest : public ::testing::Test {
base::RunLoop().RunUntilIdle();
}
- void SetPlaybackRate(float playback_rate) {
+ void SetPlaybackRate(double playback_rate) {
EXPECT_CALL(time_source_, SetPlaybackRate(playback_rate));
renderer_impl_->SetPlaybackRate(playback_rate);
base::RunLoop().RunUntilIdle();
@@ -227,7 +227,7 @@ class RendererImplTest : public ::testing::Test {
return renderer_impl_->GetMediaTime().InMilliseconds();
}
- bool IsMediaTimeAdvancing(float playback_rate) {
+ bool IsMediaTimeAdvancing(double playback_rate) {
int64 start_time_ms = GetMediaTimeMs();
const int64 time_to_advance_ms = 100;
@@ -242,7 +242,7 @@ class RendererImplTest : public ::testing::Test {
}
bool IsMediaTimeAdvancing() {
- return IsMediaTimeAdvancing(1.0f);
+ return IsMediaTimeAdvancing(1.0);
}
// Fixture members.
@@ -317,6 +317,41 @@ TEST_F(RendererImplTest, StartPlayingFrom) {
Play();
}
+TEST_F(RendererImplTest, StartPlayingFromWithPlaybackRate) {
+ InitializeWithAudioAndVideo();
+
+ // Play with a zero playback rate shouldn't start time.
+ Play();
+ Mock::VerifyAndClearExpectations(video_renderer_);
+
+ // Positive playback rate when ticking should start time.
+ EXPECT_CALL(*video_renderer_, OnTimeStateChanged(true));
+ SetPlaybackRate(1.0);
+ Mock::VerifyAndClearExpectations(video_renderer_);
+
+ // Double notifications shouldn't be sent.
+ SetPlaybackRate(1.0);
+ Mock::VerifyAndClearExpectations(video_renderer_);
+
+ // Zero playback rate should stop time.
+ EXPECT_CALL(*video_renderer_, OnTimeStateChanged(false));
+ SetPlaybackRate(0.0);
+ Mock::VerifyAndClearExpectations(video_renderer_);
+
+ // Double notifications shouldn't be sent.
+ SetPlaybackRate(0.0);
+ Mock::VerifyAndClearExpectations(video_renderer_);
+
+ // Starting playback and flushing should cause time to stop.
+ EXPECT_CALL(*video_renderer_, OnTimeStateChanged(true));
+ EXPECT_CALL(*video_renderer_, OnTimeStateChanged(false));
+ SetPlaybackRate(1.0);
+ Flush(false);
+
+ // A positive playback rate when playback isn't started should do nothing.
+ SetPlaybackRate(1.0);
+}
+
TEST_F(RendererImplTest, FlushAfterInitialization) {
InitializeWithAudioAndVideo();
Flush(true);
@@ -342,8 +377,8 @@ TEST_F(RendererImplTest, FlushAfterUnderflow) {
TEST_F(RendererImplTest, SetPlaybackRate) {
InitializeWithAudioAndVideo();
- SetPlaybackRate(1.0f);
- SetPlaybackRate(2.0f);
+ SetPlaybackRate(1.0);
+ SetPlaybackRate(2.0);
}
TEST_F(RendererImplTest, SetVolume) {
@@ -367,7 +402,7 @@ TEST_F(RendererImplTest, VideoStreamEnded) {
InitializeWithVideo();
Play();
- // Video ended won't affect |time_source_|.
+ EXPECT_CALL(time_source_, StopTicking());
EXPECT_CALL(callbacks_, OnEnded());
video_ended_cb_.Run();
@@ -431,4 +466,140 @@ TEST_F(RendererImplTest, ErrorAfterFlush) {
base::RunLoop().RunUntilIdle();
}
+TEST_F(RendererImplTest, ErrorDuringInitialize) {
+ CreateAudioAndVideoStream();
+ SetAudioRendererInitializeExpectations(PIPELINE_OK);
+
+ // Force an audio error to occur during video renderer initialization.
+ EXPECT_CALL(*video_renderer_,
+ Initialize(video_stream_.get(), _, _, _, _, _, _, _, _))
+ .WillOnce(DoAll(AudioError(&audio_error_cb_, PIPELINE_ERROR_DECODE),
+ SaveArg<4>(&video_buffering_state_cb_),
+ SaveArg<5>(&video_ended_cb_),
+ RunCallback<1>(PIPELINE_OK)));
+
+ InitializeAndExpect(PIPELINE_ERROR_DECODE);
+}
+
+TEST_F(RendererImplTest, AudioUnderflow) {
+ InitializeWithAudio();
+ Play();
+
+ // Underflow should occur immediately with a single audio track.
+ EXPECT_CALL(time_source_, StopTicking());
+ audio_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+}
+
+TEST_F(RendererImplTest, AudioUnderflowWithVideo) {
+ InitializeWithAudioAndVideo();
+ Play();
+
+ // Underflow should be immediate when both audio and video are present and
+ // audio underflows.
+ EXPECT_CALL(time_source_, StopTicking());
+ audio_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+}
+
+TEST_F(RendererImplTest, VideoUnderflow) {
+ InitializeWithVideo();
+ Play();
+
+ // Underflow should occur immediately with a single video track.
+ EXPECT_CALL(time_source_, StopTicking());
+ video_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+}
+
+TEST_F(RendererImplTest, VideoUnderflowWithAudio) {
+ InitializeWithAudioAndVideo();
+ Play();
+
+ // Set a zero threshold such that the underflow will be executed on the next
+ // run of the message loop.
+ renderer_impl_->set_video_underflow_threshold_for_testing(base::TimeDelta());
+
+ // Underflow should be delayed when both audio and video are present and video
+ // underflows.
+ video_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+ Mock::VerifyAndClearExpectations(&time_source_);
+
+ EXPECT_CALL(time_source_, StopTicking());
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, VideoUnderflowWithAudioVideoRecovers) {
+ InitializeWithAudioAndVideo();
+ Play();
+
+ // Set a zero threshold such that the underflow will be executed on the next
+ // run of the message loop.
+ renderer_impl_->set_video_underflow_threshold_for_testing(base::TimeDelta());
+
+ // Underflow should be delayed when both audio and video are present and video
+ // underflows.
+ video_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+ Mock::VerifyAndClearExpectations(&time_source_);
+
+ // If video recovers, the underflow should never occur.
+ video_buffering_state_cb_.Run(BUFFERING_HAVE_ENOUGH);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, VideoAndAudioUnderflow) {
+ InitializeWithAudioAndVideo();
+ Play();
+
+ // Set a zero threshold such that the underflow will be executed on the next
+ // run of the message loop.
+ renderer_impl_->set_video_underflow_threshold_for_testing(base::TimeDelta());
+
+ // Underflow should be delayed when both audio and video are present and video
+ // underflows.
+ video_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+ Mock::VerifyAndClearExpectations(&time_source_);
+
+ EXPECT_CALL(time_source_, StopTicking());
+ audio_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+
+ // Nothing else should primed on the message loop.
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, VideoUnderflowWithAudioFlush) {
+ InitializeWithAudioAndVideo();
+ Play();
+
+ // Set a massive threshold such that it shouldn't fire within this test.
+ renderer_impl_->set_video_underflow_threshold_for_testing(
+ base::TimeDelta::FromSeconds(100));
+
+ // Simulate the cases where audio underflows and then video underflows.
+ EXPECT_CALL(time_source_, StopTicking());
+ audio_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+ video_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+ Mock::VerifyAndClearExpectations(&time_source_);
+
+ // Flush the audio and video renderers, both think they're in an underflow
+ // state, but if the video renderer underflow was deferred, RendererImpl would
+ // think it still has enough data.
+ EXPECT_CALL(*audio_renderer_, Flush(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Flush(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(callbacks_, OnFlushed());
+ renderer_impl_->Flush(
+ base::Bind(&CallbackHelper::OnFlushed, base::Unretained(&callbacks_)));
+ base::RunLoop().RunUntilIdle();
+
+ // Start playback after the flush, but never return BUFFERING_HAVE_ENOUGH from
+ // the video renderer (which simulates spool up time for the video renderer).
+ const base::TimeDelta kStartTime;
+ EXPECT_CALL(time_source_, SetMediaTime(kStartTime));
+ EXPECT_CALL(*audio_renderer_, StartPlaying())
+ .WillOnce(
+ SetBufferingState(&audio_buffering_state_cb_, BUFFERING_HAVE_ENOUGH));
+ EXPECT_CALL(*video_renderer_, StartPlayingFrom(kStartTime));
+ renderer_impl_->StartPlayingFrom(kStartTime);
+
+ // Nothing else should primed on the message loop.
+ base::RunLoop().RunUntilIdle();
+}
+
} // namespace media
diff --git a/chromium/media/renderers/video_renderer_impl.cc b/chromium/media/renderers/video_renderer_impl.cc
new file mode 100644
index 00000000000..3477db68800
--- /dev/null
+++ b/chromium/media/renderers/video_renderer_impl.cc
@@ -0,0 +1,756 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/renderers/video_renderer_impl.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/location.h"
+#include "base/metrics/field_trial.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/string_util.h"
+#include "base/time/default_tick_clock.h"
+#include "base/trace_event/trace_event.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/buffers.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+#include "media/base/pipeline.h"
+#include "media/base/video_frame.h"
+#include "media/renderers/gpu_video_accelerator_factories.h"
+#include "media/video/gpu_memory_buffer_video_frame_pool.h"
+
+namespace media {
+
+// TODO(dalecurtis): This experiment is temporary and should be removed once we
+// have enough data to support the primacy of the new video rendering path; see
+// http://crbug.com/485699 for details.
+static bool ShouldUseVideoRenderingPath() {
+ // Note: It's important to query the field trial state first, to ensure that
+ // UMA reports the correct group.
+ const std::string group_name =
+ base::FieldTrialList::FindFullName("NewVideoRendererTrial");
+ const bool disabled_via_cli =
+ base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableNewVideoRenderer);
+ return !disabled_via_cli && !StartsWithASCII(group_name, "Disabled", true);
+}
+
+VideoRendererImpl::VideoRendererImpl(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ VideoRendererSink* sink,
+ ScopedVector<VideoDecoder> decoders,
+ bool drop_frames,
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
+ const scoped_refptr<MediaLog>& media_log)
+ : task_runner_(task_runner),
+ use_new_video_renderering_path_(ShouldUseVideoRenderingPath()),
+ sink_(sink),
+ sink_started_(false),
+ video_frame_stream_(
+ new VideoFrameStream(task_runner, decoders.Pass(), media_log)),
+ gpu_memory_buffer_pool_(
+ new GpuMemoryBufferVideoFramePool(task_runner, gpu_factories)),
+ low_delay_(false),
+ received_end_of_stream_(false),
+ rendered_end_of_stream_(false),
+ frame_available_(&lock_),
+ state_(kUninitialized),
+ thread_(),
+ pending_read_(false),
+ drop_frames_(drop_frames),
+ buffering_state_(BUFFERING_HAVE_NOTHING),
+ frames_decoded_(0),
+ frames_dropped_(0),
+ is_shutting_down_(false),
+ tick_clock_(new base::DefaultTickClock()),
+ was_background_rendering_(false),
+ time_progressing_(false),
+ render_first_frame_and_stop_(false),
+ posted_maybe_stop_after_first_paint_(false),
+ weak_factory_(this) {
+}
+
+VideoRendererImpl::~VideoRendererImpl() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (!use_new_video_renderering_path_) {
+ base::AutoLock auto_lock(lock_);
+ is_shutting_down_ = true;
+ frame_available_.Signal();
+ }
+
+ if (!thread_.is_null())
+ base::PlatformThread::Join(thread_);
+
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
+
+ if (!flush_cb_.is_null())
+ base::ResetAndReturn(&flush_cb_).Run();
+
+ if (use_new_video_renderering_path_ && sink_started_)
+ StopSink();
+}
+
+void VideoRendererImpl::Flush(const base::Closure& callback) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (use_new_video_renderering_path_ && sink_started_)
+ StopSink();
+
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(state_, kPlaying);
+ flush_cb_ = callback;
+ state_ = kFlushing;
+
+ // This is necessary if the |video_frame_stream_| has already seen an end of
+ // stream and needs to drain it before flushing it.
+ ready_frames_.clear();
+ if (buffering_state_ != BUFFERING_HAVE_NOTHING) {
+ buffering_state_ = BUFFERING_HAVE_NOTHING;
+ buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+ }
+ received_end_of_stream_ = false;
+ rendered_end_of_stream_ = false;
+
+ if (use_new_video_renderering_path_)
+ algorithm_->Reset();
+
+ video_frame_stream_->Reset(
+ base::Bind(&VideoRendererImpl::OnVideoFrameStreamResetDone,
+ weak_factory_.GetWeakPtr()));
+}
+
+void VideoRendererImpl::StartPlayingFrom(base::TimeDelta timestamp) {
+ DVLOG(1) << __FUNCTION__ << "(" << timestamp.InMicroseconds() << ")";
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(state_, kFlushed);
+ DCHECK(!pending_read_);
+ DCHECK(ready_frames_.empty());
+ DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
+
+ state_ = kPlaying;
+ start_timestamp_ = timestamp;
+ AttemptRead_Locked();
+}
+
+void VideoRendererImpl::Initialize(
+ DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const StatisticsCB& statistics_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const TimeSource::WallClockTimeCB& wall_clock_time_cb,
+ const base::Closure& waiting_for_decryption_key_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ base::AutoLock auto_lock(lock_);
+ DCHECK(stream);
+ DCHECK_EQ(stream->type(), DemuxerStream::VIDEO);
+ DCHECK(!init_cb.is_null());
+ DCHECK(!statistics_cb.is_null());
+ DCHECK(!buffering_state_cb.is_null());
+ DCHECK(!ended_cb.is_null());
+ DCHECK(!wall_clock_time_cb.is_null());
+ DCHECK_EQ(kUninitialized, state_);
+ DCHECK(!render_first_frame_and_stop_);
+ DCHECK(!posted_maybe_stop_after_first_paint_);
+ DCHECK(!was_background_rendering_);
+ DCHECK(!time_progressing_);
+
+ low_delay_ = (stream->liveness() == DemuxerStream::LIVENESS_LIVE);
+
+ // Always post |init_cb_| because |this| could be destroyed if initialization
+ // failed.
+ init_cb_ = BindToCurrentLoop(init_cb);
+
+ // Always post |buffering_state_cb_| because it may otherwise invoke reentrant
+ // calls to OnTimeStateChanged() under lock, which can deadlock the compositor
+ // and media threads.
+ buffering_state_cb_ = BindToCurrentLoop(buffering_state_cb);
+
+ statistics_cb_ = statistics_cb;
+ paint_cb_ = base::Bind(&VideoRendererSink::PaintFrameUsingOldRenderingPath,
+ base::Unretained(sink_));
+ ended_cb_ = ended_cb;
+ error_cb_ = error_cb;
+ wall_clock_time_cb_ = wall_clock_time_cb;
+ state_ = kInitializing;
+
+ video_frame_stream_->Initialize(
+ stream, base::Bind(&VideoRendererImpl::OnVideoFrameStreamInitialized,
+ weak_factory_.GetWeakPtr()),
+ set_decryptor_ready_cb, statistics_cb, waiting_for_decryption_key_cb);
+}
+
+scoped_refptr<VideoFrame> VideoRendererImpl::Render(
+ base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ bool background_rendering) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(use_new_video_renderering_path_);
+ DCHECK_EQ(state_, kPlaying);
+
+ size_t frames_dropped = 0;
+ scoped_refptr<VideoFrame> result =
+ algorithm_->Render(deadline_min, deadline_max, &frames_dropped);
+
+ // Due to how the |algorithm_| holds frames, this should never be null if
+ // we've had a proper startup sequence.
+ DCHECK(result);
+
+ // Declare HAVE_NOTHING if we reach a state where we can't progress playback
+ // any further. We don't want to do this if we've already done so, reached
+ // end of stream, or have frames available. We also don't want to do this in
+ // background rendering mode unless this isn't the first background render
+ // tick and we haven't seen any decoded frames since the last one.
+ const size_t effective_frames = MaybeFireEndedCallback();
+ if (buffering_state_ == BUFFERING_HAVE_ENOUGH && !received_end_of_stream_ &&
+ !effective_frames && (!background_rendering ||
+ (!frames_decoded_ && was_background_rendering_))) {
+ // Do not set |buffering_state_| here as the lock in FrameReady() may be
+ // held already and it fire the state changes in the wrong order.
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VideoRendererImpl::TransitionToHaveNothing,
+ weak_factory_.GetWeakPtr()));
+ }
+
+ // We don't count dropped frames in the background to avoid skewing the count
+ // and impacting JavaScript visible metrics used by web developers.
+ //
+ // Just after resuming from background rendering, we also don't count the
+ // dropped frames since they are likely just dropped due to being too old.
+ if (!background_rendering && !was_background_rendering_)
+ frames_dropped_ += frames_dropped;
+ UpdateStatsAndWait_Locked(base::TimeDelta());
+ was_background_rendering_ = background_rendering;
+
+ // After painting the first frame, if playback hasn't started, we post a
+ // delayed task to request that the sink be stopped. The task is delayed to
+ // give videos with autoplay time to start.
+ //
+ // OnTimeStateChanged() will clear this flag if time starts before we get here
+ // and MaybeStopSinkAfterFirstPaint() will ignore this request if time starts
+ // before the call executes.
+ if (render_first_frame_and_stop_ && !posted_maybe_stop_after_first_paint_) {
+ posted_maybe_stop_after_first_paint_ = true;
+ task_runner_->PostDelayedTask(
+ FROM_HERE, base::Bind(&VideoRendererImpl::MaybeStopSinkAfterFirstPaint,
+ weak_factory_.GetWeakPtr()),
+ base::TimeDelta::FromMilliseconds(250));
+ }
+
+ // Always post this task, it will acquire new frames if necessary and since it
+ // happens on another thread, even if we don't have room in the queue now, by
+ // the time it runs (may be delayed up to 50ms for complex decodes!) we might.
+ task_runner_->PostTask(FROM_HERE, base::Bind(&VideoRendererImpl::AttemptRead,
+ weak_factory_.GetWeakPtr()));
+
+ return result;
+}
+
+void VideoRendererImpl::OnFrameDropped() {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(use_new_video_renderering_path_);
+ algorithm_->OnLastFrameDropped();
+}
+
+void VideoRendererImpl::CreateVideoThread() {
+ // This may fail and cause a crash if there are too many threads created in
+ // the current process. See http://crbug.com/443291
+ CHECK(base::PlatformThread::Create(0, this, &thread_));
+
+#if defined(OS_WIN)
+ // Bump up our priority so our sleeping is more accurate.
+ // TODO(scherkus): find out if this is necessary, but it seems to help.
+ ::SetThreadPriority(thread_.platform_handle(), THREAD_PRIORITY_ABOVE_NORMAL);
+#endif // defined(OS_WIN)
+}
+
+void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(state_, kInitializing);
+
+ if (!success) {
+ state_ = kUninitialized;
+ base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
+ return;
+ }
+
+ // We're all good! Consider ourselves flushed. (ThreadMain() should never
+ // see us in the kUninitialized state).
+ // Since we had an initial Preroll(), we consider ourself flushed, because we
+ // have not populated any buffers yet.
+ state_ = kFlushed;
+
+ if (use_new_video_renderering_path_) {
+ algorithm_.reset(new VideoRendererAlgorithm(wall_clock_time_cb_));
+ if (!drop_frames_)
+ algorithm_->disable_frame_dropping();
+ } else {
+ CreateVideoThread();
+ }
+
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
+}
+
+// PlatformThread::Delegate implementation.
+void VideoRendererImpl::ThreadMain() {
+ DCHECK(!use_new_video_renderering_path_);
+ base::PlatformThread::SetName("CrVideoRenderer");
+
+ // The number of milliseconds to idle when we do not have anything to do.
+ // Nothing special about the value, other than we're being more OS-friendly
+ // than sleeping for 1 millisecond.
+ //
+ // TODO(scherkus): switch to pure event-driven frame timing instead of this
+ // kIdleTimeDelta business http://crbug.com/106874
+ const base::TimeDelta kIdleTimeDelta =
+ base::TimeDelta::FromMilliseconds(10);
+
+ for (;;) {
+ base::AutoLock auto_lock(lock_);
+
+ // Thread exit condition.
+ if (is_shutting_down_)
+ return;
+
+ // Remain idle as long as we're not playing.
+ if (state_ != kPlaying || buffering_state_ != BUFFERING_HAVE_ENOUGH) {
+ UpdateStatsAndWait_Locked(kIdleTimeDelta);
+ continue;
+ }
+
+ base::TimeTicks now = tick_clock_->NowTicks();
+
+ // Remain idle until we have the next frame ready for rendering.
+ if (ready_frames_.empty()) {
+ base::TimeDelta wait_time = kIdleTimeDelta;
+ if (received_end_of_stream_) {
+ if (!rendered_end_of_stream_) {
+ rendered_end_of_stream_ = true;
+ task_runner_->PostTask(FROM_HERE, ended_cb_);
+ }
+ } else if (now >= latest_possible_paint_time_) {
+ // Declare HAVE_NOTHING if we don't have another frame by the time we
+ // are ready to paint the next one.
+ buffering_state_ = BUFFERING_HAVE_NOTHING;
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(buffering_state_cb_, BUFFERING_HAVE_NOTHING));
+ } else {
+ wait_time = std::min(kIdleTimeDelta, latest_possible_paint_time_ - now);
+ }
+
+ UpdateStatsAndWait_Locked(wait_time);
+ continue;
+ }
+
+ base::TimeTicks target_paint_time =
+ ConvertMediaTimestamp(ready_frames_.front()->timestamp());
+
+ // If media time has stopped, don't attempt to paint any more frames.
+ if (target_paint_time.is_null()) {
+ UpdateStatsAndWait_Locked(kIdleTimeDelta);
+ continue;
+ }
+
+ // Deadline is defined as the duration between this frame and the next
+ // frame, using the delta between this frame and the previous frame as the
+ // assumption for frame duration.
+ //
+ // TODO(scherkus): This can be vastly improved. Use a histogram to measure
+ // the accuracy of our frame timing code. http://crbug.com/149829
+ if (last_media_time_.is_null()) {
+ latest_possible_paint_time_ = now;
+ } else {
+ base::TimeDelta duration = target_paint_time - last_media_time_;
+ latest_possible_paint_time_ = target_paint_time + duration;
+ }
+
+ // Remain idle until we've reached our target paint window.
+ if (now < target_paint_time) {
+ UpdateStatsAndWait_Locked(
+ std::min(target_paint_time - now, kIdleTimeDelta));
+ continue;
+ }
+
+ if (ready_frames_.size() > 1 && now > latest_possible_paint_time_ &&
+ drop_frames_) {
+ DropNextReadyFrame_Locked();
+ continue;
+ }
+
+ // Congratulations! You've made it past the video frame timing gauntlet.
+ //
+ // At this point enough time has passed that the next frame that ready for
+ // rendering.
+ PaintNextReadyFrame_Locked();
+ }
+}
+
+void VideoRendererImpl::SetTickClockForTesting(
+ scoped_ptr<base::TickClock> tick_clock) {
+ tick_clock_.swap(tick_clock);
+}
+
+void VideoRendererImpl::OnTimeStateChanged(bool time_progressing) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ time_progressing_ = time_progressing;
+
+ // WARNING: Do not attempt to use |lock_| here as this may be a reentrant call
+ // in response to callbacks firing above.
+
+ if (!use_new_video_renderering_path_ || sink_started_ == time_progressing_)
+ return;
+
+ if (time_progressing_) {
+ // If only an EOS frame came in after a seek, the renderer may not have
+ // received the ended event yet though we've posted it.
+ if (!rendered_end_of_stream_)
+ StartSink();
+ } else {
+ StopSink();
+ }
+}
+
+void VideoRendererImpl::PaintNextReadyFrame_Locked() {
+ DCHECK(!use_new_video_renderering_path_);
+ lock_.AssertAcquired();
+
+ scoped_refptr<VideoFrame> next_frame = ready_frames_.front();
+ ready_frames_.pop_front();
+
+ last_media_time_ = ConvertMediaTimestamp(next_frame->timestamp());
+
+ paint_cb_.Run(next_frame);
+
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
+}
+
+void VideoRendererImpl::DropNextReadyFrame_Locked() {
+ DCHECK(!use_new_video_renderering_path_);
+ TRACE_EVENT0("media", "VideoRendererImpl:frameDropped");
+
+ lock_.AssertAcquired();
+
+ last_media_time_ = ConvertMediaTimestamp(ready_frames_.front()->timestamp());
+
+ ready_frames_.pop_front();
+ frames_dropped_++;
+
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
+}
+
+void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
+ const scoped_refptr<VideoFrame>& frame) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ bool start_sink = false;
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_NE(state_, kUninitialized);
+ DCHECK_NE(state_, kFlushed);
+
+ CHECK(pending_read_);
+ pending_read_ = false;
+
+ if (status == VideoFrameStream::DECODE_ERROR ||
+ status == VideoFrameStream::DECRYPT_ERROR) {
+ DCHECK(!frame.get());
+ PipelineStatus error = PIPELINE_ERROR_DECODE;
+ if (status == VideoFrameStream::DECRYPT_ERROR)
+ error = PIPELINE_ERROR_DECRYPT;
+ task_runner_->PostTask(FROM_HERE, base::Bind(error_cb_, error));
+ return;
+ }
+
+ // Already-queued VideoFrameStream ReadCB's can fire after various state
+ // transitions have happened; in that case just drop those frames
+ // immediately.
+ if (state_ == kFlushing)
+ return;
+
+ DCHECK_EQ(state_, kPlaying);
+
+ // Can happen when demuxers are preparing for a new Seek().
+ if (!frame.get()) {
+ DCHECK_EQ(status, VideoFrameStream::DEMUXER_READ_ABORTED);
+ return;
+ }
+
+ if (frame->end_of_stream()) {
+ DCHECK(!received_end_of_stream_);
+ received_end_of_stream_ = true;
+
+ // See if we can fire EOS immediately instead of waiting for Render().
+ if (use_new_video_renderering_path_)
+ MaybeFireEndedCallback();
+ } else {
+ // Maintain the latest frame decoded so the correct frame is displayed
+ // after prerolling has completed.
+ if (frame->timestamp() <= start_timestamp_) {
+ if (use_new_video_renderering_path_)
+ algorithm_->Reset();
+ ready_frames_.clear();
+ }
+ AddReadyFrame_Locked(frame);
+ }
+
+ // Signal buffering state if we've met our conditions for having enough
+ // data.
+ if (buffering_state_ != BUFFERING_HAVE_ENOUGH && HaveEnoughData_Locked()) {
+ TransitionToHaveEnough_Locked();
+ if (use_new_video_renderering_path_ && !sink_started_ &&
+ !rendered_end_of_stream_) {
+ start_sink = true;
+ render_first_frame_and_stop_ = true;
+ posted_maybe_stop_after_first_paint_ = false;
+ }
+ }
+
+ // Background rendering updates may not be ticking fast enough by itself to
+ // remove expired frames, so give it a boost here by ensuring we don't exit
+ // the decoding cycle too early.
+ if (was_background_rendering_) {
+ DCHECK(use_new_video_renderering_path_);
+ algorithm_->RemoveExpiredFrames(tick_clock_->NowTicks());
+ }
+
+ // Always request more decoded video if we have capacity. This serves two
+ // purposes:
+ // 1) Prerolling while paused
+ // 2) Keeps decoding going if video rendering thread starts falling behind
+ AttemptRead_Locked();
+ }
+
+ // If time is progressing, the sink has already been started; this may be true
+ // if we have previously underflowed, yet weren't stopped because of audio.
+ if (use_new_video_renderering_path_ && start_sink) {
+ DCHECK(!sink_started_);
+ StartSink();
+ }
+}
+
+bool VideoRendererImpl::HaveEnoughData_Locked() {
+ DCHECK_EQ(state_, kPlaying);
+
+ if (received_end_of_stream_ || !video_frame_stream_->CanReadWithoutStalling())
+ return true;
+
+ if (HaveReachedBufferingCap())
+ return true;
+
+ if (use_new_video_renderering_path_ && was_background_rendering_ &&
+ frames_decoded_) {
+ return true;
+ }
+
+ if (!low_delay_)
+ return false;
+
+ return ready_frames_.size() > 0 ||
+ (use_new_video_renderering_path_ && algorithm_->frames_queued() > 0);
+}
+
+void VideoRendererImpl::TransitionToHaveEnough_Locked() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
+
+ if (!ready_frames_.empty()) {
+ DCHECK(!use_new_video_renderering_path_);
+ // Because the clock might remain paused in for an undetermined amount
+ // of time (e.g., seeking while paused), paint the first frame.
+ PaintNextReadyFrame_Locked();
+ }
+
+ buffering_state_ = BUFFERING_HAVE_ENOUGH;
+ buffering_state_cb_.Run(BUFFERING_HAVE_ENOUGH);
+}
+
+void VideoRendererImpl::TransitionToHaveNothing() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ base::AutoLock auto_lock(lock_);
+ if (buffering_state_ != BUFFERING_HAVE_ENOUGH || HaveEnoughData_Locked())
+ return;
+
+ buffering_state_ = BUFFERING_HAVE_NOTHING;
+ buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+}
+
+void VideoRendererImpl::AddReadyFrame_Locked(
+ const scoped_refptr<VideoFrame>& frame) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ lock_.AssertAcquired();
+ DCHECK(!frame->end_of_stream());
+
+ frames_decoded_++;
+
+ if (use_new_video_renderering_path_) {
+ algorithm_->EnqueueFrame(frame);
+ return;
+ }
+
+ ready_frames_.push_back(frame);
+ DCHECK_LE(ready_frames_.size(),
+ static_cast<size_t>(limits::kMaxVideoFrames));
+
+ // Avoid needlessly waking up |thread_| unless playing.
+ if (state_ == kPlaying)
+ frame_available_.Signal();
+}
+
+void VideoRendererImpl::AttemptRead() {
+ base::AutoLock auto_lock(lock_);
+ AttemptRead_Locked();
+}
+
+void VideoRendererImpl::AttemptRead_Locked() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ lock_.AssertAcquired();
+
+ if (pending_read_ || received_end_of_stream_)
+ return;
+
+ if (HaveReachedBufferingCap())
+ return;
+
+ switch (state_) {
+ case kPlaying:
+ pending_read_ = true;
+ video_frame_stream_->Read(base::Bind(&VideoRendererImpl::FrameReady,
+ weak_factory_.GetWeakPtr()));
+ return;
+
+ case kUninitialized:
+ case kInitializing:
+ case kFlushing:
+ case kFlushed:
+ return;
+ }
+}
+
+void VideoRendererImpl::OnVideoFrameStreamResetDone() {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(kFlushing, state_);
+ DCHECK(!pending_read_);
+ DCHECK(ready_frames_.empty());
+ DCHECK(!received_end_of_stream_);
+ DCHECK(!rendered_end_of_stream_);
+ DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
+
+ state_ = kFlushed;
+ latest_possible_paint_time_ = last_media_time_ = base::TimeTicks();
+ base::ResetAndReturn(&flush_cb_).Run();
+}
+
+void VideoRendererImpl::UpdateStatsAndWait_Locked(
+ base::TimeDelta wait_duration) {
+ lock_.AssertAcquired();
+ DCHECK_GE(frames_decoded_, 0);
+ DCHECK_GE(frames_dropped_, 0);
+
+ if (frames_decoded_ || frames_dropped_) {
+ PipelineStatistics statistics;
+ statistics.video_frames_decoded = frames_decoded_;
+ statistics.video_frames_dropped = frames_dropped_;
+ task_runner_->PostTask(FROM_HERE, base::Bind(statistics_cb_, statistics));
+
+ frames_decoded_ = 0;
+ frames_dropped_ = 0;
+ }
+
+ if (wait_duration > base::TimeDelta())
+ frame_available_.TimedWait(wait_duration);
+}
+
+void VideoRendererImpl::MaybeStopSinkAfterFirstPaint() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(use_new_video_renderering_path_);
+
+ {
+ base::AutoLock auto_lock(lock_);
+ render_first_frame_and_stop_ = false;
+ }
+
+ if (!time_progressing_ && sink_started_)
+ StopSink();
+}
+
+bool VideoRendererImpl::HaveReachedBufferingCap() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ const size_t kMaxVideoFrames = limits::kMaxVideoFrames;
+
+ if (use_new_video_renderering_path_) {
+ // When the display rate is less than the frame rate, the effective frames
+ // queued may be much smaller than the actual number of frames queued. Here
+ // we ensure that frames_queued() doesn't get excessive.
+ return algorithm_->EffectiveFramesQueued() >= kMaxVideoFrames ||
+ algorithm_->frames_queued() >= 3 * kMaxVideoFrames;
+ }
+
+ return ready_frames_.size() >= kMaxVideoFrames;
+}
+
+void VideoRendererImpl::StartSink() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_GT(algorithm_->frames_queued(), 0u);
+ sink_->Start(this);
+ sink_started_ = true;
+ was_background_rendering_ = false;
+}
+
+void VideoRendererImpl::StopSink() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ sink_->Stop();
+ sink_started_ = false;
+ was_background_rendering_ = false;
+}
+
+size_t VideoRendererImpl::MaybeFireEndedCallback() {
+ // If there's only one frame in the video or Render() was never called, the
+ // algorithm will have one frame linger indefinitely. So in cases where the
+ // frame duration is unknown and we've received EOS, fire it once we get down
+ // to a single frame.
+ const size_t effective_frames = algorithm_->EffectiveFramesQueued();
+
+ // Don't fire ended if we haven't received EOS or have already done so.
+ if (!received_end_of_stream_ || rendered_end_of_stream_)
+ return effective_frames;
+
+ // Don't fire ended if time isn't moving and we have frames.
+ if (!time_progressing_ && algorithm_->frames_queued())
+ return effective_frames;
+
+ // Fire ended if we have no more effective frames or only ever had one frame.
+ if (!effective_frames ||
+ (algorithm_->frames_queued() == 1u &&
+ algorithm_->average_frame_duration() == base::TimeDelta())) {
+ rendered_end_of_stream_ = true;
+ task_runner_->PostTask(FROM_HERE, ended_cb_);
+ }
+
+ return effective_frames;
+}
+
+base::TimeTicks VideoRendererImpl::ConvertMediaTimestamp(
+ base::TimeDelta media_time) {
+ std::vector<base::TimeDelta> media_times(1, media_time);
+ std::vector<base::TimeTicks> wall_clock_times;
+ if (!wall_clock_time_cb_.Run(media_times, &wall_clock_times))
+ return base::TimeTicks();
+ return wall_clock_times[0];
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_renderer_impl.h b/chromium/media/renderers/video_renderer_impl.h
index 557a88475e4..a2e89c557d4 100644
--- a/chromium/media/filters/video_renderer_impl.h
+++ b/chromium/media/renderers/video_renderer_impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_VIDEO_RENDERER_IMPL_H_
-#define MEDIA_FILTERS_VIDEO_RENDERER_IMPL_H_
+#ifndef MEDIA_RENDERERS_VIDEO_RENDERER_IMPL_H_
+#define MEDIA_RENDERERS_VIDEO_RENDERER_IMPL_H_
#include <deque>
@@ -14,6 +14,7 @@
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
+#include "base/timer/timer.h"
#include "media/base/decryptor.h"
#include "media/base/demuxer_stream.h"
#include "media/base/media_log.h"
@@ -21,10 +22,15 @@
#include "media/base/video_decoder.h"
#include "media/base/video_frame.h"
#include "media/base/video_renderer.h"
+#include "media/base/video_renderer_sink.h"
#include "media/filters/decoder_stream.h"
+#include "media/filters/video_renderer_algorithm.h"
+#include "media/renderers/gpu_video_accelerator_factories.h"
+#include "media/video/gpu_memory_buffer_video_frame_pool.h"
namespace base {
class SingleThreadTaskRunner;
+class TickClock;
}
namespace media {
@@ -35,15 +41,11 @@ namespace media {
// ready for rendering.
class MEDIA_EXPORT VideoRendererImpl
: public VideoRenderer,
+ public NON_EXPORTED_BASE(VideoRendererSink::RenderCallback),
public base::PlatformThread::Delegate {
public:
- typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> PaintCB;
-
// |decoders| contains the VideoDecoders to use when initializing.
//
- // |paint_cb| is executed on the video frame timing thread whenever a new
- // frame is available for painting.
- //
// Implementors should avoid doing any sort of heavy work in this method and
// instead post a task to a common/worker thread to handle rendering. Slowing
// down the video thread may result in losing synchronization with audio.
@@ -51,29 +53,46 @@ class MEDIA_EXPORT VideoRendererImpl
// Setting |drop_frames_| to true causes the renderer to drop expired frames.
VideoRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ VideoRendererSink* sink,
ScopedVector<VideoDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
- const PaintCB& paint_cb,
bool drop_frames,
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
const scoped_refptr<MediaLog>& media_log);
~VideoRendererImpl() override;
// VideoRenderer implementation.
void Initialize(DemuxerStream* stream,
- bool low_delay,
const PipelineStatusCB& init_cb,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
const StatisticsCB& statistics_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb) override;
+ const TimeSource::WallClockTimeCB& wall_clock_time_cb,
+ const base::Closure& waiting_for_decryption_key_cb) override;
void Flush(const base::Closure& callback) override;
void StartPlayingFrom(base::TimeDelta timestamp) override;
+ void OnTimeStateChanged(bool time_progressing) override;
// PlatformThread::Delegate implementation.
void ThreadMain() override;
+ void SetTickClockForTesting(scoped_ptr<base::TickClock> tick_clock);
+
+ // VideoRendererSink::RenderCallback implementation.
+ scoped_refptr<VideoFrame> Render(base::TimeTicks deadline_min,
+ base::TimeTicks deadline_max,
+ bool background_rendering) override;
+ void OnFrameDropped() override;
+
+ void disable_new_video_renderer_for_testing() {
+ use_new_video_renderering_path_ = false;
+ }
+
private:
+ // Creates a dedicated |thread_| for video rendering.
+ void CreateVideoThread();
+
// Callback for |video_frame_stream_| initialization.
void OnVideoFrameStreamInitialized(bool success);
@@ -108,25 +127,60 @@ class MEDIA_EXPORT VideoRendererImpl
// Note that having enough data may be due to reaching end of stream.
bool HaveEnoughData_Locked();
void TransitionToHaveEnough_Locked();
+ void TransitionToHaveNothing();
// Runs |statistics_cb_| with |frames_decoded_| and |frames_dropped_|, resets
// them to 0, and then waits on |frame_available_| for up to the
// |wait_duration|.
void UpdateStatsAndWait_Locked(base::TimeDelta wait_duration);
+ // Called after we've painted the first frame. If |time_progressing_| is
+ // false it Stop() on |sink_|.
+ void MaybeStopSinkAfterFirstPaint();
+
+ // Returns true if there is no more room for additional buffered frames.
+ bool HaveReachedBufferingCap();
+
+ // Starts or stops |sink_| respectively. Do not call while |lock_| is held.
+ void StartSink();
+ void StopSink();
+
+ // Fires |ended_cb_| if there are no remaining usable frames and
+ // |received_end_of_stream_| is true. Sets |rendered_end_of_stream_| if it
+ // does so. Returns algorithm_->EffectiveFramesQueued().
+ size_t MaybeFireEndedCallback();
+
+ // Helper method for converting a single media timestamp to wall clock time.
+ base::TimeTicks ConvertMediaTimestamp(base::TimeDelta media_timestamp);
+
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ // Enables the use of VideoRendererAlgorithm and VideoRendererSink for frame
+ // rendering instead of using a thread in a sleep-loop. Set via the command
+ // line flag kEnableNewVideoRenderer or via test methods.
+ bool use_new_video_renderering_path_;
+
+ // Sink which calls into VideoRendererImpl via Render() for video frames. Do
+ // not call any methods on the sink while |lock_| is held or the two threads
+ // might deadlock. Do not call Start() or Stop() on the sink directly, use
+ // StartSink() and StopSink() to ensure background rendering is started.
+ VideoRendererSink* const sink_;
+ bool sink_started_;
+
// Used for accessing data members.
base::Lock lock_;
// Provides video frames to VideoRendererImpl.
scoped_ptr<VideoFrameStream> video_frame_stream_;
+ // Pool of GpuMemoryBuffers and resources used to create hardware frames.
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_;
+
// Flag indicating low-delay mode.
bool low_delay_;
// Queue of incoming frames yet to be painted.
- typedef std::deque<scoped_refptr<VideoFrame> > VideoFrameQueue;
+ typedef std::deque<scoped_refptr<VideoFrame>> VideoFrameQueue;
VideoFrameQueue ready_frames_;
// Keeps track of whether we received the end of stream buffer and finished
@@ -184,21 +238,20 @@ class MEDIA_EXPORT VideoRendererImpl
BufferingStateCB buffering_state_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
- TimeDeltaCB get_time_cb_;
+ TimeSource::WallClockTimeCB wall_clock_time_cb_;
base::TimeDelta start_timestamp_;
// Embedder callback for notifying a new frame is available for painting.
PaintCB paint_cb_;
- // The timestamp of the last frame removed from the |ready_frames_| queue,
- // either for calling |paint_cb_| or for dropping. Set to kNoTimestamp()
- // during flushing.
- base::TimeDelta last_timestamp_;
+ // The wallclock times of the last frame removed from the |ready_frames_|
+ // queue, either for calling |paint_cb_| or for dropping. Set to null during
+ // flushing.
+ base::TimeTicks last_media_time_;
- // The timestamp of the last successfully painted frame. Set to kNoTimestamp()
- // during flushing.
- base::TimeDelta last_painted_timestamp_;
+ // Equivalent to |last_media_time_| + the estimated duration of the frame.
+ base::TimeTicks latest_possible_paint_time_;
// Keeps track of the number of frames decoded and dropped since the
// last call to |statistics_cb_|. These must be accessed under lock.
@@ -207,6 +260,26 @@ class MEDIA_EXPORT VideoRendererImpl
bool is_shutting_down_;
+ scoped_ptr<base::TickClock> tick_clock_;
+
+ // Algorithm for selecting which frame to render; manages frames and all
+ // timing related information.
+ scoped_ptr<VideoRendererAlgorithm> algorithm_;
+
+ // Indicates that Render() was called with |background_rendering| set to true,
+ // so we've entered a background rendering mode where dropped frames are not
+ // counted.
+ bool was_background_rendering_;
+
+ // Indicates whether or not media time is currently progressing or not.
+ bool time_progressing_;
+
+ // Indicates that Render() should only render the first frame and then request
+ // that the sink be stopped. |posted_maybe_stop_after_first_paint_| is used
+ // to avoid repeated task posts.
+ bool render_first_frame_and_stop_;
+ bool posted_maybe_stop_after_first_paint_;
+
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<VideoRendererImpl> weak_factory_;
@@ -215,4 +288,4 @@ class MEDIA_EXPORT VideoRendererImpl
} // namespace media
-#endif // MEDIA_FILTERS_VIDEO_RENDERER_IMPL_H_
+#endif // MEDIA_RENDERERS_VIDEO_RENDERER_IMPL_H_
diff --git a/chromium/media/filters/video_renderer_impl_unittest.cc b/chromium/media/renderers/video_renderer_impl_unittest.cc
index 1a26f0c1306..aa494cb49ce 100644
--- a/chromium/media/filters/video_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/video_renderer_impl_unittest.cc
@@ -14,18 +14,22 @@
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
+#include "base/test/simple_test_tick_clock.h"
#include "media/base/data_buffer.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/limits.h"
#include "media/base/mock_filters.h"
+#include "media/base/null_video_sink.h"
#include "media/base/test_helpers.h"
#include "media/base/video_frame.h"
-#include "media/filters/video_renderer_impl.h"
+#include "media/base/wall_clock_time_source.h"
+#include "media/renderers/video_renderer_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
+using ::testing::Mock;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::SaveArg;
@@ -42,21 +46,33 @@ MATCHER_P(HasTimestamp, ms, "") {
return arg->timestamp().InMilliseconds() == ms;
}
-class VideoRendererImplTest : public ::testing::Test {
+class VideoRendererImplTest : public testing::TestWithParam<bool> {
public:
VideoRendererImplTest()
- : decoder_(new MockVideoDecoder()),
+ : tick_clock_(new base::SimpleTestTickClock()),
+ decoder_(new MockVideoDecoder()),
demuxer_stream_(DemuxerStream::VIDEO) {
ScopedVector<VideoDecoder> decoders;
decoders.push_back(decoder_);
- renderer_.reset(new VideoRendererImpl(
- message_loop_.message_loop_proxy(),
- decoders.Pass(),
- media::SetDecryptorReadyCB(),
- base::Bind(&StrictMock<MockCB>::Display, base::Unretained(&mock_cb_)),
- true,
- new MediaLog()));
+ null_video_sink_.reset(new NullVideoSink(
+ false, base::TimeDelta::FromSecondsD(1.0 / 60),
+ base::Bind(&MockCB::FrameReceived, base::Unretained(&mock_cb_)),
+ message_loop_.task_runner()));
+
+ renderer_.reset(new VideoRendererImpl(message_loop_.message_loop_proxy(),
+ null_video_sink_.get(),
+ decoders.Pass(), true,
+ nullptr, // gpu_factories
+ new MediaLog()));
+ if (!GetParam())
+ renderer_->disable_new_video_renderer_for_testing();
+ renderer_->SetTickClockForTesting(scoped_ptr<base::TickClock>(tick_clock_));
+ null_video_sink_->set_tick_clock_for_testing(tick_clock_);
+ time_source_.set_tick_clock_for_testing(tick_clock_);
+
+ // Start wallclock time at a non-zero value.
+ AdvanceWallclockTimeInMs(12345);
demuxer_stream_.set_video_decoder_config(TestVideoConfig::Normal());
@@ -94,25 +110,31 @@ class VideoRendererImplTest : public ::testing::Test {
void CallInitialize(const PipelineStatusCB& status_cb,
bool low_delay,
PipelineStatus decoder_status) {
- EXPECT_CALL(*decoder_, Initialize(_, _, _, _)).WillOnce(
- DoAll(SaveArg<3>(&output_cb_), RunCallback<2>(decoder_status)));
+ if (low_delay)
+ demuxer_stream_.set_liveness(DemuxerStream::LIVENESS_LIVE);
+ EXPECT_CALL(*decoder_, Initialize(_, _, _, _))
+ .WillOnce(
+ DoAll(SaveArg<3>(&output_cb_), RunCallback<2>(decoder_status)));
+ EXPECT_CALL(*this, OnWaitingForDecryptionKey()).Times(0);
renderer_->Initialize(
- &demuxer_stream_,
- low_delay,
- status_cb,
+ &demuxer_stream_, status_cb, media::SetDecryptorReadyCB(),
base::Bind(&VideoRendererImplTest::OnStatisticsUpdate,
base::Unretained(this)),
base::Bind(&StrictMock<MockCB>::BufferingStateChange,
base::Unretained(&mock_cb_)),
- ended_event_.GetClosure(),
- error_event_.GetPipelineStatusCB(),
- base::Bind(&VideoRendererImplTest::GetTime, base::Unretained(this)));
+ ended_event_.GetClosure(), error_event_.GetPipelineStatusCB(),
+ base::Bind(&WallClockTimeSource::GetWallClockTimes,
+ base::Unretained(&time_source_)),
+ base::Bind(&VideoRendererImplTest::OnWaitingForDecryptionKey,
+ base::Unretained(this)));
}
void StartPlayingFrom(int milliseconds) {
SCOPED_TRACE(base::StringPrintf("StartPlayingFrom(%d)", milliseconds));
- renderer_->StartPlayingFrom(
- base::TimeDelta::FromMilliseconds(milliseconds));
+ const base::TimeDelta media_time =
+ base::TimeDelta::FromMilliseconds(milliseconds);
+ time_source_.SetMediaTime(media_time);
+ renderer_->StartPlayingFrom(media_time);
message_loop_.RunUntilIdle();
}
@@ -238,32 +260,48 @@ class VideoRendererImplTest : public ::testing::Test {
base::Bind(base::ResetAndReturn(&decode_cb_), VideoDecoder::kOk));
}
+ void AdvanceWallclockTimeInMs(int time_ms) {
+ DCHECK_EQ(&message_loop_, base::MessageLoop::current());
+ base::AutoLock l(lock_);
+ tick_clock_->Advance(base::TimeDelta::FromMilliseconds(time_ms));
+ }
+
void AdvanceTimeInMs(int time_ms) {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
base::AutoLock l(lock_);
time_ += base::TimeDelta::FromMilliseconds(time_ms);
+ time_source_.StopTicking();
+ time_source_.SetMediaTime(time_);
+ time_source_.StartTicking();
+ }
+
+ bool has_ended() const {
+ return ended_event_.is_signaled();
}
protected:
// Fixture members.
scoped_ptr<VideoRendererImpl> renderer_;
+ base::SimpleTestTickClock* tick_clock_; // Owned by |renderer_|.
MockVideoDecoder* decoder_; // Owned by |renderer_|.
NiceMock<MockDemuxerStream> demuxer_stream_;
// Use StrictMock<T> to catch missing/extra callbacks.
class MockCB {
public:
- MOCK_METHOD1(Display, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(FrameReceived, void(const scoped_refptr<VideoFrame>&));
MOCK_METHOD1(BufferingStateChange, void(BufferingState));
};
StrictMock<MockCB> mock_cb_;
- private:
- base::TimeDelta GetTime() {
- base::AutoLock l(lock_);
- return time_;
- }
+ // Must be destroyed before |renderer_| since they share |tick_clock_|.
+ scoped_ptr<NullVideoSink> null_video_sink_;
+
+ PipelineStatistics last_pipeline_statistics_;
+ WallClockTimeSource time_source_;
+
+ private:
void DecodeRequested(const scoped_refptr<DecoderBuffer>& buffer,
const VideoDecoder::DecodeCB& decode_cb) {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
@@ -291,7 +329,11 @@ class VideoRendererImplTest : public ::testing::Test {
message_loop_.PostTask(FROM_HERE, callback);
}
- void OnStatisticsUpdate(const PipelineStatistics& stats) {}
+ void OnStatisticsUpdate(const PipelineStatistics& stats) {
+ last_pipeline_statistics_ = stats;
+ }
+
+ MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
base::MessageLoop message_loop_;
@@ -316,38 +358,56 @@ class VideoRendererImplTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(VideoRendererImplTest);
};
-TEST_F(VideoRendererImplTest, DoNothing) {
+TEST_P(VideoRendererImplTest, DoNothing) {
// Test that creation and deletion doesn't depend on calls to Initialize()
// and/or Destroy().
}
-TEST_F(VideoRendererImplTest, DestroyWithoutInitialize) {
+TEST_P(VideoRendererImplTest, DestroyWithoutInitialize) {
Destroy();
}
-TEST_F(VideoRendererImplTest, Initialize) {
+TEST_P(VideoRendererImplTest, Initialize) {
Initialize();
Destroy();
}
-TEST_F(VideoRendererImplTest, InitializeAndStartPlayingFrom) {
+TEST_P(VideoRendererImplTest, InitializeAndStartPlayingFrom) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(0);
Destroy();
}
-TEST_F(VideoRendererImplTest, DestroyWhileInitializing) {
+TEST_P(VideoRendererImplTest, InitializeAndEndOfStream) {
+ Initialize();
+ StartPlayingFrom(0);
+ WaitForPendingRead();
+ {
+ SCOPED_TRACE("Waiting for BUFFERING_HAVE_ENOUGH");
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH))
+ .WillOnce(RunClosure(event.GetClosure()));
+ SatisfyPendingReadWithEndOfStream();
+ event.RunAndWait();
+ }
+ // Firing a time state changed to true should be ignored...
+ renderer_->OnTimeStateChanged(true);
+ EXPECT_FALSE(null_video_sink_->is_started());
+ Destroy();
+}
+
+TEST_P(VideoRendererImplTest, DestroyWhileInitializing) {
CallInitialize(NewExpectedStatusCB(PIPELINE_ERROR_ABORT), false, PIPELINE_OK);
Destroy();
}
-TEST_F(VideoRendererImplTest, DestroyWhileFlushing) {
+TEST_P(VideoRendererImplTest, DestroyWhileFlushing) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(0);
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING));
@@ -355,16 +415,16 @@ TEST_F(VideoRendererImplTest, DestroyWhileFlushing) {
Destroy();
}
-TEST_F(VideoRendererImplTest, Play) {
+TEST_P(VideoRendererImplTest, Play) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(0);
Destroy();
}
-TEST_F(VideoRendererImplTest, FlushWithNothingBuffered) {
+TEST_P(VideoRendererImplTest, FlushWithNothingBuffered) {
Initialize();
StartPlayingFrom(0);
@@ -374,12 +434,15 @@ TEST_F(VideoRendererImplTest, FlushWithNothingBuffered) {
Destroy();
}
-TEST_F(VideoRendererImplTest, DecodeError_Playing) {
+TEST_P(VideoRendererImplTest, DecodeError_Playing) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, FrameReceived(_)).Times(testing::AtLeast(1));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(0);
+ renderer_->OnTimeStateChanged(true);
+ time_source_.StartTicking();
+ AdvanceTimeInMs(10);
QueueFrames("error");
SatisfyPendingRead();
@@ -387,50 +450,50 @@ TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Destroy();
}
-TEST_F(VideoRendererImplTest, DecodeError_DuringStartPlayingFrom) {
+TEST_P(VideoRendererImplTest, DecodeError_DuringStartPlayingFrom) {
Initialize();
QueueFrames("error");
StartPlayingFrom(0);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlayingFrom_Exact) {
+TEST_P(VideoRendererImplTest, StartPlayingFrom_Exact) {
Initialize();
QueueFrames("50 60 70 80 90");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(60)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(60)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(60);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlayingFrom_RightBefore) {
+TEST_P(VideoRendererImplTest, StartPlayingFrom_RightBefore) {
Initialize();
QueueFrames("50 60 70 80 90");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(50)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(50)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(59);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlayingFrom_RightAfter) {
+TEST_P(VideoRendererImplTest, StartPlayingFrom_RightAfter) {
Initialize();
QueueFrames("50 60 70 80 90");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(60)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(60)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(61);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
+TEST_P(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
// In low-delay mode only one frame is required to finish preroll.
InitializeWithLowDelay(true);
QueueFrames("0");
// Expect some amount of have enough/nothing due to only requiring one frame.
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH))
.Times(AnyNumber());
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
@@ -440,8 +503,11 @@ TEST_F(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
QueueFrames("10");
SatisfyPendingRead();
+ renderer_->OnTimeStateChanged(true);
+ time_source_.StartTicking();
+
WaitableMessageLoopEvent event;
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(10)))
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(10)))
.WillOnce(RunClosure(event.GetClosure()));
AdvanceTimeInMs(10);
event.RunAndWait();
@@ -450,10 +516,10 @@ TEST_F(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
}
// Verify that a late decoder response doesn't break invariants in the renderer.
-TEST_F(VideoRendererImplTest, DestroyDuringOutstandingRead) {
+TEST_P(VideoRendererImplTest, DestroyDuringOutstandingRead) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
StartPlayingFrom(0);
@@ -463,30 +529,65 @@ TEST_F(VideoRendererImplTest, DestroyDuringOutstandingRead) {
Destroy();
}
-TEST_F(VideoRendererImplTest, VideoDecoder_InitFailure) {
+TEST_P(VideoRendererImplTest, VideoDecoder_InitFailure) {
InitializeRenderer(DECODER_ERROR_NOT_SUPPORTED, false);
Destroy();
}
-TEST_F(VideoRendererImplTest, Underflow) {
+TEST_P(VideoRendererImplTest, Underflow) {
Initialize();
- QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
- EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlayingFrom(0);
+ QueueFrames("0 30 60 90");
+
+ {
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH))
+ .WillOnce(RunClosure(event.GetClosure()));
+ StartPlayingFrom(0);
+ event.RunAndWait();
+ Mock::VerifyAndClearExpectations(&mock_cb_);
+ }
+
+ renderer_->OnTimeStateChanged(true);
+
+ // Advance time slightly, but enough to exceed the duration of the last frame.
+ // Frames should be dropped and we should NOT signal having nothing.
+ {
+ SCOPED_TRACE("Waiting for frame drops");
+ WaitableMessageLoopEvent event;
- // Advance time slightly. Frames should be dropped and we should NOT signal
- // having nothing.
- AdvanceTimeInMs(100);
+ // Note: Starting the TimeSource will cause the old VideoRendererImpl to
+ // start rendering frames on its own thread, so the first frame may be
+ // received.
+ time_source_.StartTicking();
+ if (GetParam())
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(30))).Times(0);
+ else
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(30))).Times(AnyNumber());
+
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(60))).Times(0);
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(90)))
+ .WillOnce(RunClosure(event.GetClosure()));
+ AdvanceTimeInMs(91);
+
+ event.RunAndWait();
+ Mock::VerifyAndClearExpectations(&mock_cb_);
+ }
- // Advance time more. Now we should signal having nothing.
+ // Advance time more. Now we should signal having nothing. And put
+ // the last frame up for display.
{
SCOPED_TRACE("Waiting for BUFFERING_HAVE_NOTHING");
WaitableMessageLoopEvent event;
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
.WillOnce(RunClosure(event.GetClosure()));
- AdvanceTimeInMs(3000); // Must match kTimeToDeclareHaveNothing.
+ AdvanceTimeInMs(30);
+ // The old rendering path needs wall clock time to increase too.
+ if (!GetParam())
+ AdvanceWallclockTimeInMs(30);
+
event.RunAndWait();
+ Mock::VerifyAndClearExpectations(&mock_cb_);
}
// Receiving end of stream should signal having enough.
@@ -503,4 +604,121 @@ TEST_F(VideoRendererImplTest, Underflow) {
Destroy();
}
+// Verifies that the sink is stopped after rendering the first frame if
+// playback hasn't started.
+TEST_P(VideoRendererImplTest, RenderingStopsAfterFirstFrame) {
+ // This test is only for the new rendering path.
+ if (!GetParam())
+ return;
+
+ InitializeWithLowDelay(true);
+ QueueFrames("0");
+
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+
+ {
+ SCOPED_TRACE("Waiting for sink to stop.");
+ WaitableMessageLoopEvent event;
+
+ null_video_sink_->set_background_render(true);
+ null_video_sink_->set_stop_cb(event.GetClosure());
+ StartPlayingFrom(0);
+
+ EXPECT_TRUE(IsReadPending());
+ SatisfyPendingReadWithEndOfStream();
+
+ event.RunAndWait();
+ }
+
+ EXPECT_FALSE(has_ended());
+ Destroy();
+}
+
+// Verifies that the sink is stopped after rendering the first frame if
+// playback ha started.
+TEST_P(VideoRendererImplTest, RenderingStopsAfterOneFrameWithEOS) {
+ // This test is only for the new rendering path.
+ if (!GetParam())
+ return;
+
+ InitializeWithLowDelay(true);
+ QueueFrames("0");
+
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+
+ {
+ SCOPED_TRACE("Waiting for sink to stop.");
+ WaitableMessageLoopEvent event;
+
+ null_video_sink_->set_stop_cb(event.GetClosure());
+ StartPlayingFrom(0);
+ renderer_->OnTimeStateChanged(true);
+
+ EXPECT_TRUE(IsReadPending());
+ SatisfyPendingReadWithEndOfStream();
+ WaitForEnded();
+
+ renderer_->OnTimeStateChanged(false);
+ event.RunAndWait();
+ }
+
+ Destroy();
+}
+
+// Tests the case where the video started and received a single Render() call,
+// then the video was put into the background.
+TEST_P(VideoRendererImplTest, RenderingStartedThenStopped) {
+ // This test is only for the new rendering path.
+ if (!GetParam())
+ return;
+
+ Initialize();
+ QueueFrames("0 30 60 90");
+
+ // Start the sink and wait for the first callback. Set statistics to a non
+ // zero value, once we have some decoded frames they should be overwritten.
+ last_pipeline_statistics_.video_frames_dropped = 1;
+ {
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)))
+ .WillOnce(RunClosure(event.GetClosure()));
+ StartPlayingFrom(0);
+ event.RunAndWait();
+ Mock::VerifyAndClearExpectations(&mock_cb_);
+ EXPECT_EQ(0u, last_pipeline_statistics_.video_frames_dropped);
+ }
+
+ renderer_->OnTimeStateChanged(true);
+ time_source_.StartTicking();
+
+ // Suspend all future callbacks and synthetically advance the media time,
+ // because this is a background render, we won't underflow by waiting until
+ // a pending read is ready.
+ null_video_sink_->set_background_render(true);
+ AdvanceTimeInMs(91);
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(90)));
+ WaitForPendingRead();
+ SatisfyPendingReadWithEndOfStream();
+
+ // If this wasn't background rendering mode, this would result in two frames
+ // being dropped, but since we set background render to true, none should be
+ // reported
+ EXPECT_EQ(0u, last_pipeline_statistics_.video_frames_dropped);
+ EXPECT_EQ(4u, last_pipeline_statistics_.video_frames_decoded);
+
+ AdvanceTimeInMs(30);
+ WaitForEnded();
+ Destroy();
+}
+
+INSTANTIATE_TEST_CASE_P(OldVideoRenderer,
+ VideoRendererImplTest,
+ testing::Values(false));
+INSTANTIATE_TEST_CASE_P(NewVideoRenderer,
+ VideoRendererImplTest,
+ testing::Values(true));
+
} // namespace media
diff --git a/chromium/media/tools/player_x11/data_source_logger.cc b/chromium/media/tools/player_x11/data_source_logger.cc
deleted file mode 100644
index d09b6bf2e1a..00000000000
--- a/chromium/media/tools/player_x11/data_source_logger.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "media/tools/player_x11/data_source_logger.h"
-
-static void LogAndRunReadCB(
- int64 position, int size,
- const media::DataSource::ReadCB& read_cb, int result) {
- VLOG(1) << "Read(" << position << ", " << size << ") -> " << result;
- read_cb.Run(result);
-}
-
-DataSourceLogger::DataSourceLogger(
- scoped_ptr<media::DataSource> data_source,
- bool streaming)
- : data_source_(data_source.Pass()),
- streaming_(streaming) {
-}
-
-void DataSourceLogger::Stop() {
- VLOG(1) << "Stop()";
- data_source_->Stop();
-}
-
-void DataSourceLogger::Read(
- int64 position, int size, uint8* data,
- const media::DataSource::ReadCB& read_cb) {
- VLOG(1) << "Read(" << position << ", " << size << ")";
- data_source_->Read(position, size, data, base::Bind(
- &LogAndRunReadCB, position, size, read_cb));
-}
-
-bool DataSourceLogger::GetSize(int64* size_out) {
- bool success = data_source_->GetSize(size_out);
- VLOG(1) << "GetSize() -> " << (success ? "true" : "false")
- << ", " << *size_out;
- return success;
-}
-
-bool DataSourceLogger::IsStreaming() {
- if (streaming_) {
- VLOG(1) << "IsStreaming() -> true (overridden)";
- return true;
- }
-
- bool streaming = data_source_->IsStreaming();
- VLOG(1) << "IsStreaming() -> " << (streaming ? "true" : "false");
- return streaming;
-}
-
-void DataSourceLogger::SetBitrate(int bitrate) {
- VLOG(1) << "SetBitrate(" << bitrate << ")";
- data_source_->SetBitrate(bitrate);
-}
-
-DataSourceLogger::~DataSourceLogger() {}
diff --git a/chromium/media/tools/player_x11/data_source_logger.h b/chromium/media/tools/player_x11/data_source_logger.h
deleted file mode 100644
index 13fdc6067ef..00000000000
--- a/chromium/media/tools/player_x11/data_source_logger.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_PLAYER_X11_DATA_SOURCE_LOGGER_H_
-#define MEDIA_TOOLS_PLAYER_X11_DATA_SOURCE_LOGGER_H_
-
-#include "media/base/data_source.h"
-
-// Logs all DataSource operations to VLOG(1) for debugging purposes.
-class DataSourceLogger : public media::DataSource {
- public:
- // Constructs a DataSourceLogger to log operations against another DataSource.
- //
- // |data_source| must be initialized in advance.
- //
- // |streaming| when set to true will override the implementation
- // IsStreaming() to always return true, otherwise it will delegate to
- // |data_source|.
- DataSourceLogger(scoped_ptr<DataSource> data_source,
- bool force_streaming);
- ~DataSourceLogger() override;
-
- // media::DataSource implementation.
- void Stop() override;
- void Read(int64 position,
- int size,
- uint8* data,
- const media::DataSource::ReadCB& read_cb) override;
- bool GetSize(int64* size_out) override;
- bool IsStreaming() override;
- void SetBitrate(int bitrate) override;
-
- private:
- scoped_ptr<media::DataSource> data_source_;
- bool streaming_;
-
- DISALLOW_COPY_AND_ASSIGN(DataSourceLogger);
-};
-
-#endif // MEDIA_TOOLS_PLAYER_X11_DATA_SOURCE_LOGGER_H_
diff --git a/chromium/media/tools/player_x11/gl_video_renderer.cc b/chromium/media/tools/player_x11/gl_video_renderer.cc
deleted file mode 100644
index 5f233c47a9d..00000000000
--- a/chromium/media/tools/player_x11/gl_video_renderer.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/tools/player_x11/gl_video_renderer.h"
-
-#include <X11/Xutil.h>
-
-#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
-#include "media/base/buffers.h"
-#include "media/base/video_frame.h"
-#include "media/base/yuv_convert.h"
-#include "ui/gl/gl_surface.h"
-
-enum { kNumYUVPlanes = 3 };
-
-static GLXContext InitGLContext(Display* display, Window window) {
- // Some versions of NVIDIA's GL libGL.so include a broken version of
- // dlopen/dlsym, and so linking it into chrome breaks it. So we dynamically
- // load it, and use glew to dynamically resolve symbols.
- // See http://code.google.com/p/chromium/issues/detail?id=16800
- if (!gfx::GLSurface::InitializeOneOff()) {
- LOG(ERROR) << "GLSurface::InitializeOneOff failed";
- return NULL;
- }
-
- XWindowAttributes attributes;
- XGetWindowAttributes(display, window, &attributes);
- XVisualInfo visual_info_template;
- visual_info_template.visualid = XVisualIDFromVisual(attributes.visual);
- int visual_info_count = 0;
- XVisualInfo* visual_info_list = XGetVisualInfo(display, VisualIDMask,
- &visual_info_template,
- &visual_info_count);
- GLXContext context = NULL;
- for (int i = 0; i < visual_info_count && !context; ++i) {
- context = glXCreateContext(display, visual_info_list + i, 0,
- True /* Direct rendering */);
- }
-
- XFree(visual_info_list);
- if (!context) {
- return NULL;
- }
-
- if (!glXMakeCurrent(display, window, context)) {
- glXDestroyContext(display, context);
- return NULL;
- }
-
- return context;
-}
-
-// Matrix used for the YUV to RGB conversion.
-static const float kYUV2RGB[9] = {
- 1.f, 0.f, 1.403f,
- 1.f, -.344f, -.714f,
- 1.f, 1.772f, 0.f,
-};
-
-// Vertices for a full screen quad.
-static const float kVertices[8] = {
- -1.f, 1.f,
- -1.f, -1.f,
- 1.f, 1.f,
- 1.f, -1.f,
-};
-
-// Pass-through vertex shader.
-static const char kVertexShader[] =
- "varying vec2 interp_tc;\n"
- "\n"
- "attribute vec4 in_pos;\n"
- "attribute vec2 in_tc;\n"
- "\n"
- "void main() {\n"
- " interp_tc = in_tc;\n"
- " gl_Position = in_pos;\n"
- "}\n";
-
-// YUV to RGB pixel shader. Loads a pixel from each plane and pass through the
-// matrix.
-static const char kFragmentShader[] =
- "varying vec2 interp_tc;\n"
- "\n"
- "uniform sampler2D y_tex;\n"
- "uniform sampler2D u_tex;\n"
- "uniform sampler2D v_tex;\n"
- "uniform mat3 yuv2rgb;\n"
- "\n"
- "void main() {\n"
- " float y = texture2D(y_tex, interp_tc).x;\n"
- " float u = texture2D(u_tex, interp_tc).r - .5;\n"
- " float v = texture2D(v_tex, interp_tc).r - .5;\n"
- " vec3 rgb = yuv2rgb * vec3(y, u, v);\n"
- " gl_FragColor = vec4(rgb, 1);\n"
- "}\n";
-
-// Buffer size for compile errors.
-static const unsigned int kErrorSize = 4096;
-
-GlVideoRenderer::GlVideoRenderer(Display* display, Window window)
- : display_(display),
- window_(window),
- gl_context_(NULL) {
-}
-
-GlVideoRenderer::~GlVideoRenderer() {
- glXMakeCurrent(display_, 0, NULL);
- glXDestroyContext(display_, gl_context_);
-}
-
-void GlVideoRenderer::Paint(
- const scoped_refptr<media::VideoFrame>& video_frame) {
- if (!gl_context_)
- Initialize(video_frame->coded_size(), video_frame->visible_rect());
-
- // Convert YUV frame to RGB.
- DCHECK(video_frame->format() == media::VideoFrame::YV12 ||
- video_frame->format() == media::VideoFrame::I420 ||
- video_frame->format() == media::VideoFrame::YV16);
- DCHECK(video_frame->stride(media::VideoFrame::kUPlane) ==
- video_frame->stride(media::VideoFrame::kVPlane));
-
- if (glXGetCurrentContext() != gl_context_ ||
- glXGetCurrentDrawable() != window_) {
- glXMakeCurrent(display_, window_, gl_context_);
- }
- for (unsigned int i = 0; i < kNumYUVPlanes; ++i) {
- unsigned int width = video_frame->stride(i);
- unsigned int height = video_frame->rows(i);
- glActiveTexture(GL_TEXTURE0 + i);
- glPixelStorei(GL_UNPACK_ROW_LENGTH, video_frame->stride(i));
- glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height, 0,
- GL_LUMINANCE, GL_UNSIGNED_BYTE, video_frame->data(i));
- }
-
- glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
- glXSwapBuffers(display_, window_);
-}
-
-void GlVideoRenderer::Initialize(gfx::Size coded_size, gfx::Rect visible_rect) {
- CHECK(!gl_context_);
- VLOG(0) << "Initializing GL Renderer...";
-
- // Resize the window to fit that of the video.
- XResizeWindow(display_, window_, visible_rect.width(), visible_rect.height());
-
- gl_context_ = InitGLContext(display_, window_);
- CHECK(gl_context_) << "Failed to initialize GL context";
-
- // Create 3 textures, one for each plane, and bind them to different
- // texture units.
- glGenTextures(3, textures_);
- glActiveTexture(GL_TEXTURE0);
- glBindTexture(GL_TEXTURE_2D, textures_[0]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glEnable(GL_TEXTURE_2D);
-
- glActiveTexture(GL_TEXTURE1);
- glBindTexture(GL_TEXTURE_2D, textures_[1]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glEnable(GL_TEXTURE_2D);
-
- glActiveTexture(GL_TEXTURE2);
- glBindTexture(GL_TEXTURE_2D, textures_[2]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glEnable(GL_TEXTURE_2D);
-
- GLuint program = glCreateProgram();
-
- // Create our YUV->RGB shader.
- GLuint vertex_shader = glCreateShader(GL_VERTEX_SHADER);
- const char* vs_source = kVertexShader;
- int vs_size = sizeof(kVertexShader);
- glShaderSource(vertex_shader, 1, &vs_source, &vs_size);
- glCompileShader(vertex_shader);
- int result = GL_FALSE;
- glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &result);
- if (!result) {
- char log[kErrorSize];
- int len = 0;
- glGetShaderInfoLog(vertex_shader, kErrorSize - 1, &len, log);
- log[kErrorSize - 1] = 0;
- LOG(FATAL) << log;
- }
- glAttachShader(program, vertex_shader);
- glDeleteShader(vertex_shader);
-
- GLuint fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
- const char* ps_source = kFragmentShader;
- int ps_size = sizeof(kFragmentShader);
- glShaderSource(fragment_shader, 1, &ps_source, &ps_size);
- glCompileShader(fragment_shader);
- result = GL_FALSE;
- glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &result);
- if (!result) {
- char log[kErrorSize];
- int len = 0;
- glGetShaderInfoLog(fragment_shader, kErrorSize - 1, &len, log);
- log[kErrorSize - 1] = 0;
- LOG(FATAL) << log;
- }
- glAttachShader(program, fragment_shader);
- glDeleteShader(fragment_shader);
-
- glLinkProgram(program);
- result = GL_FALSE;
- glGetProgramiv(program, GL_LINK_STATUS, &result);
- if (!result) {
- char log[kErrorSize];
- int len = 0;
- glGetProgramInfoLog(program, kErrorSize - 1, &len, log);
- log[kErrorSize - 1] = 0;
- LOG(FATAL) << log;
- }
- glUseProgram(program);
- glDeleteProgram(program);
-
- // Bind parameters.
- glUniform1i(glGetUniformLocation(program, "y_tex"), 0);
- glUniform1i(glGetUniformLocation(program, "u_tex"), 1);
- glUniform1i(glGetUniformLocation(program, "v_tex"), 2);
- int yuv2rgb_location = glGetUniformLocation(program, "yuv2rgb");
- glUniformMatrix3fv(yuv2rgb_location, 1, GL_TRUE, kYUV2RGB);
-
- int pos_location = glGetAttribLocation(program, "in_pos");
- glEnableVertexAttribArray(pos_location);
- glVertexAttribPointer(pos_location, 2, GL_FLOAT, GL_FALSE, 0, kVertices);
-
- int tc_location = glGetAttribLocation(program, "in_tc");
- glEnableVertexAttribArray(tc_location);
- float verts[8];
- float x0 = static_cast<float>(visible_rect.x()) / coded_size.width();
- float y0 = static_cast<float>(visible_rect.y()) / coded_size.height();
- float x1 = static_cast<float>(visible_rect.right()) / coded_size.width();
- float y1 = static_cast<float>(visible_rect.bottom()) / coded_size.height();
- verts[0] = x0; verts[1] = y0;
- verts[2] = x0; verts[3] = y1;
- verts[4] = x1; verts[5] = y0;
- verts[6] = x1; verts[7] = y1;
- glVertexAttribPointer(tc_location, 2, GL_FLOAT, GL_FALSE, 0, verts);
-
- // We are getting called on a thread. Release the context so that it can be
- // made current on the main thread.
- glXMakeCurrent(display_, 0, NULL);
-}
diff --git a/chromium/media/tools/player_x11/gl_video_renderer.h b/chromium/media/tools/player_x11/gl_video_renderer.h
deleted file mode 100644
index c9f68efdeca..00000000000
--- a/chromium/media/tools/player_x11/gl_video_renderer.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_PLAYER_X11_GL_VIDEO_RENDERER_H_
-#define MEDIA_TOOLS_PLAYER_X11_GL_VIDEO_RENDERER_H_
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
-#include "ui/gl/gl_bindings.h"
-
-namespace media {
-class VideoFrame;
-}
-
-class GlVideoRenderer : public base::RefCountedThreadSafe<GlVideoRenderer> {
- public:
- GlVideoRenderer(Display* display, Window window);
-
- void Paint(const scoped_refptr<media::VideoFrame>& video_frame);
-
- private:
- friend class base::RefCountedThreadSafe<GlVideoRenderer>;
- ~GlVideoRenderer();
-
- // Initializes GL rendering for the given dimensions.
- void Initialize(gfx::Size coded_size, gfx::Rect visible_rect);
-
- Display* display_;
- Window window_;
-
- // GL context.
- GLXContext gl_context_;
-
- // 3 textures, one for each plane.
- GLuint textures_[3];
-
- DISALLOW_COPY_AND_ASSIGN(GlVideoRenderer);
-};
-
-#endif // MEDIA_TOOLS_PLAYER_X11_GL_VIDEO_RENDERER_H_
diff --git a/chromium/media/tools/player_x11/player_x11.cc b/chromium/media/tools/player_x11/player_x11.cc
deleted file mode 100644
index ded465cf137..00000000000
--- a/chromium/media/tools/player_x11/player_x11.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <signal.h>
-
-#include <iostream> // NOLINT
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/files/file_path.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/scoped_vector.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/null_audio_sink.h"
-#include "media/base/audio_hardware_config.h"
-#include "media/base/bind_to_current_loop.h"
-#include "media/base/decryptor.h"
-#include "media/base/media.h"
-#include "media/base/media_log.h"
-#include "media/base/media_switches.h"
-#include "media/base/pipeline.h"
-#include "media/base/text_track.h"
-#include "media/base/text_track_config.h"
-#include "media/base/video_frame.h"
-#include "media/filters/audio_renderer_impl.h"
-#include "media/filters/ffmpeg_audio_decoder.h"
-#include "media/filters/ffmpeg_demuxer.h"
-#include "media/filters/ffmpeg_video_decoder.h"
-#include "media/filters/file_data_source.h"
-#include "media/filters/renderer_impl.h"
-#include "media/filters/video_renderer_impl.h"
-#include "media/tools/player_x11/data_source_logger.h"
-
-// Include X11 headers here because X11/Xlib.h #define's Status
-// which causes compiler errors with Status enum declarations
-// in media::DemuxerStream & media::AudioDecoder.
-#include <X11/XKBlib.h>
-#include <X11/Xlib.h>
-
-#include "media/tools/player_x11/gl_video_renderer.h"
-#include "media/tools/player_x11/x11_video_renderer.h"
-
-static Display* g_display = NULL;
-static Window g_window = 0;
-static bool g_running = false;
-
-media::AudioManager* g_audio_manager = NULL;
-
-scoped_ptr<media::DataSource> CreateDataSource(const std::string& file_path) {
- media::FileDataSource* file_data_source = new media::FileDataSource();
- CHECK(file_data_source->Initialize(base::FilePath(file_path)));
-
- scoped_ptr<media::DataSource> data_source(file_data_source);
- return data_source.Pass();
-}
-
-// Initialize X11. Returns true if successful. This method creates the X11
-// window. Further initialization is done in X11VideoRenderer.
-bool InitX11() {
- g_display = XOpenDisplay(NULL);
- if (!g_display) {
- std::cout << "Error - cannot open display" << std::endl;
- return false;
- }
-
- // Get properties of the screen.
- int screen = DefaultScreen(g_display);
- int root_window = RootWindow(g_display, screen);
-
- // Creates the window.
- g_window = XCreateSimpleWindow(g_display, root_window, 1, 1, 100, 50, 0,
- BlackPixel(g_display, screen),
- BlackPixel(g_display, screen));
- XStoreName(g_display, g_window, "X11 Media Player");
-
- XSelectInput(g_display, g_window,
- ExposureMask | ButtonPressMask | KeyPressMask);
- XMapWindow(g_display, g_window);
- return true;
-}
-
-static void DoNothing() {}
-
-static void OnStatus(media::PipelineStatus status) {}
-
-static void OnMetadata(media::PipelineMetadata metadata) {}
-
-static void OnBufferingStateChanged(media::BufferingState buffering_state) {}
-
-static void OnAddTextTrack(const media::TextTrackConfig& config,
- const media::AddTextTrackDoneCB& done_cb) {
-}
-
-static void NeedKey(const std::string& type,
- const std::vector<uint8>& init_data) {
- std::cout << "File is encrypted." << std::endl;
-}
-
-static void SaveStatusAndSignal(base::WaitableEvent* event,
- media::PipelineStatus* status_out,
- media::PipelineStatus status) {
- *status_out = status;
- event->Signal();
-}
-
-// TODO(vrk): Re-enabled audio. (crbug.com/112159)
-void InitPipeline(
- media::Pipeline* pipeline,
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- media::Demuxer* demuxer,
- const media::VideoRendererImpl::PaintCB& paint_cb,
- bool /* enable_audio */) {
- ScopedVector<media::VideoDecoder> video_decoders;
- video_decoders.push_back(new media::FFmpegVideoDecoder(task_runner));
- scoped_ptr<media::VideoRenderer> video_renderer(
- new media::VideoRendererImpl(task_runner,
- video_decoders.Pass(),
- media::SetDecryptorReadyCB(),
- paint_cb,
- true,
- new media::MediaLog()));
-
- ScopedVector<media::AudioDecoder> audio_decoders;
- audio_decoders.push_back(new media::FFmpegAudioDecoder(task_runner,
- media::LogCB()));
- media::AudioParameters out_params(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_STEREO,
- 44100,
- 16,
- 512);
- media::AudioHardwareConfig hardware_config(out_params, out_params);
-
- scoped_ptr<media::AudioRenderer> audio_renderer(
- new media::AudioRendererImpl(task_runner,
- new media::NullAudioSink(task_runner),
- audio_decoders.Pass(),
- media::SetDecryptorReadyCB(),
- hardware_config,
- new media::MediaLog()));
-
- scoped_ptr<media::Renderer> renderer(new media::RendererImpl(
- task_runner, audio_renderer.Pass(), video_renderer.Pass()));
-
- base::WaitableEvent event(true, false);
- media::PipelineStatus status;
-
- pipeline->Start(demuxer,
- renderer.Pass(),
- base::Bind(&DoNothing),
- base::Bind(&OnStatus),
- base::Bind(&SaveStatusAndSignal, &event, &status),
- base::Bind(&OnMetadata),
- base::Bind(&OnBufferingStateChanged),
- base::Bind(&DoNothing),
- base::Bind(&OnAddTextTrack));
-
- // Wait until the pipeline is fully initialized.
- event.Wait();
- CHECK_EQ(status, media::PIPELINE_OK) << "Pipeline initialization failed";
-
- // And start the playback.
- pipeline->SetPlaybackRate(1.0f);
-}
-
-void TerminateHandler(int signal) {
- g_running = false;
-}
-
-void PeriodicalUpdate(
- media::Pipeline* pipeline,
- base::MessageLoop* message_loop) {
- if (!g_running) {
- // interrupt signal was received during last time period.
- // Quit message_loop only when pipeline is fully stopped.
- pipeline->Stop(base::MessageLoop::QuitClosure());
- return;
- }
-
- // Consume all the X events
- while (XPending(g_display)) {
- XEvent e;
- XNextEvent(g_display, &e);
- switch (e.type) {
- case ButtonPress:
- {
- Window window;
- int x, y;
- unsigned int width, height, border_width, depth;
- XGetGeometry(g_display,
- g_window,
- &window,
- &x,
- &y,
- &width,
- &height,
- &border_width,
- &depth);
- base::TimeDelta time = pipeline->GetMediaDuration();
- pipeline->Seek(time*e.xbutton.x/width, base::Bind(&OnStatus));
- }
- break;
- case KeyPress:
- {
- KeySym key = XkbKeycodeToKeysym(g_display, e.xkey.keycode, 0, 0);
- if (key == XK_Escape) {
- g_running = false;
- // Quit message_loop only when pipeline is fully stopped.
- pipeline->Stop(base::MessageLoop::QuitClosure());
- return;
- } else if (key == XK_space) {
- if (pipeline->GetPlaybackRate() < 0.01f) // paused
- pipeline->SetPlaybackRate(1.0f);
- else
- pipeline->SetPlaybackRate(0.0f);
- }
- }
- break;
- default:
- break;
- }
- }
-
- message_loop->PostDelayedTask(
- FROM_HERE,
- base::Bind(&PeriodicalUpdate,
- base::Unretained(pipeline),
- message_loop),
- base::TimeDelta::FromMilliseconds(10));
-}
-
-int main(int argc, char** argv) {
- base::AtExitManager at_exit;
- media::InitializeMediaLibraryForTesting();
-
- CommandLine::Init(argc, argv);
- CommandLine* command_line = CommandLine::ForCurrentProcess();
- std::string filename = command_line->GetSwitchValueASCII("file");
-
- if (filename.empty()) {
- std::cout << "Usage: " << argv[0] << " --file=FILE" << std::endl
- << std::endl
- << "Optional arguments:" << std::endl
- << " [--audio]"
- << " [--alsa-device=DEVICE]"
- << " [--use-gl]"
- << " [--streaming]" << std::endl
- << " Press [ESC] to stop" << std::endl
- << " Press [SPACE] to toggle pause/play" << std::endl
- << " Press mouse left button to seek" << std::endl;
- return 1;
- }
-
- scoped_ptr<media::AudioManager> audio_manager(
- media::AudioManager::CreateForTesting());
- g_audio_manager = audio_manager.get();
-
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- logging::InitLogging(settings);
-
- // Install the signal handler.
- signal(SIGTERM, &TerminateHandler);
- signal(SIGINT, &TerminateHandler);
-
- // Initialize X11.
- if (!InitX11())
- return 1;
-
- // Initialize the pipeline thread and the pipeline.
- base::MessageLoop message_loop;
- base::Thread media_thread("MediaThread");
- media_thread.Start();
-
- media::VideoRendererImpl::PaintCB paint_cb;
- if (command_line->HasSwitch("use-gl")) {
- paint_cb = media::BindToCurrentLoop(base::Bind(
- &GlVideoRenderer::Paint, new GlVideoRenderer(g_display, g_window)));
- } else {
- paint_cb = media::BindToCurrentLoop(base::Bind(
- &X11VideoRenderer::Paint, new X11VideoRenderer(g_display, g_window)));
- }
-
- scoped_ptr<media::DataSource> data_source(new DataSourceLogger(
- CreateDataSource(filename), command_line->HasSwitch("streaming")));
- scoped_ptr<media::Demuxer> demuxer(new media::FFmpegDemuxer(
- media_thread.message_loop_proxy(), data_source.get(),
- base::Bind(&NeedKey), new media::MediaLog()));
-
- media::Pipeline pipeline(media_thread.message_loop_proxy(),
- new media::MediaLog());
- InitPipeline(&pipeline, media_thread.message_loop_proxy(), demuxer.get(),
- paint_cb, command_line->HasSwitch("audio"));
-
- // Main loop of the application.
- g_running = true;
-
- message_loop.PostTask(FROM_HERE, base::Bind(
- &PeriodicalUpdate, base::Unretained(&pipeline), &message_loop));
- message_loop.Run();
-
- // Cleanup tasks.
- media_thread.Stop();
-
- // Release callback which releases video renderer. Do this before cleaning up
- // X below since the video renderer has some X cleanup duties as well.
- paint_cb.Reset();
-
- XDestroyWindow(g_display, g_window);
- XCloseDisplay(g_display);
- g_audio_manager = NULL;
-
- return 0;
-}
diff --git a/chromium/media/tools/player_x11/x11_video_renderer.cc b/chromium/media/tools/player_x11/x11_video_renderer.cc
deleted file mode 100644
index 2ae8e3b3a7f..00000000000
--- a/chromium/media/tools/player_x11/x11_video_renderer.cc
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/tools/player_x11/x11_video_renderer.h"
-
-#include <dlfcn.h>
-#include <X11/Xutil.h>
-#include <X11/extensions/Xrender.h>
-#include <X11/extensions/Xcomposite.h>
-
-#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
-#include "media/base/video_frame.h"
-#include "media/base/yuv_convert.h"
-
-// Creates a 32-bit XImage.
-static XImage* CreateImage(Display* display, int width, int height) {
- VLOG(0) << "Allocating XImage " << width << "x" << height;
- return XCreateImage(display,
- DefaultVisual(display, DefaultScreen(display)),
- DefaultDepth(display, DefaultScreen(display)),
- ZPixmap,
- 0,
- static_cast<char*>(malloc(width * height * 4)),
- width,
- height,
- 32,
- width * 4);
-}
-
-// Returns the picture format for ARGB.
-// This method is originally from chrome/common/x11_util.cc.
-static XRenderPictFormat* GetRenderARGB32Format(Display* dpy) {
- static XRenderPictFormat* pictformat = NULL;
- if (pictformat)
- return pictformat;
-
- // First look for a 32-bit format which ignores the alpha value.
- XRenderPictFormat templ;
- templ.depth = 32;
- templ.type = PictTypeDirect;
- templ.direct.red = 16;
- templ.direct.green = 8;
- templ.direct.blue = 0;
- templ.direct.redMask = 0xff;
- templ.direct.greenMask = 0xff;
- templ.direct.blueMask = 0xff;
- templ.direct.alphaMask = 0;
-
- static const unsigned long kMask =
- PictFormatType | PictFormatDepth |
- PictFormatRed | PictFormatRedMask |
- PictFormatGreen | PictFormatGreenMask |
- PictFormatBlue | PictFormatBlueMask |
- PictFormatAlphaMask;
-
- pictformat = XRenderFindFormat(dpy, kMask, &templ, 0 /* first result */);
-
- if (!pictformat) {
- // Not all X servers support xRGB32 formats. However, the XRender spec
- // says that they must support an ARGB32 format, so we can always return
- // that.
- pictformat = XRenderFindStandardFormat(dpy, PictStandardARGB32);
- CHECK(pictformat) << "XRender ARGB32 not supported.";
- }
-
- return pictformat;
-}
-
-X11VideoRenderer::X11VideoRenderer(Display* display, Window window)
- : display_(display),
- window_(window),
- image_(NULL),
- picture_(0),
- use_render_(false) {
-}
-
-X11VideoRenderer::~X11VideoRenderer() {
- if (image_)
- XDestroyImage(image_);
- if (use_render_)
- XRenderFreePicture(display_, picture_);
-}
-
-void X11VideoRenderer::Paint(
- const scoped_refptr<media::VideoFrame>& video_frame) {
- if (!image_)
- Initialize(video_frame->coded_size(), video_frame->visible_rect());
-
- const int coded_width = video_frame->coded_size().width();
- const int coded_height = video_frame->coded_size().height();
- const int visible_width = video_frame->visible_rect().width();
- const int visible_height = video_frame->visible_rect().height();
-
- // Check if we need to reallocate our XImage.
- if (image_->width != coded_width || image_->height != coded_height) {
- XDestroyImage(image_);
- image_ = CreateImage(display_, coded_width, coded_height);
- }
-
- // Convert YUV frame to RGB.
- DCHECK(video_frame->format() == media::VideoFrame::YV12 ||
- video_frame->format() == media::VideoFrame::I420 ||
- video_frame->format() == media::VideoFrame::YV16);
- DCHECK(video_frame->stride(media::VideoFrame::kUPlane) ==
- video_frame->stride(media::VideoFrame::kVPlane));
-
- DCHECK(image_->data);
- media::YUVType yuv_type = (video_frame->format() == media::VideoFrame::YV12 ||
- video_frame->format() == media::VideoFrame::I420)
- ? media::YV12
- : media::YV16;
- media::ConvertYUVToRGB32(video_frame->data(media::VideoFrame::kYPlane),
- video_frame->data(media::VideoFrame::kUPlane),
- video_frame->data(media::VideoFrame::kVPlane),
- (uint8*)image_->data, coded_width, coded_height,
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->stride(media::VideoFrame::kUPlane),
- image_->bytes_per_line,
- yuv_type);
-
- if (use_render_) {
- // If XRender is used, we'll upload the image to a pixmap. And then
- // creats a picture from the pixmap and composite the picture over
- // the picture represending the window.
-
- // Creates a XImage.
- XImage image;
- memset(&image, 0, sizeof(image));
- image.width = coded_width;
- image.height = coded_height;
- image.depth = 32;
- image.bits_per_pixel = 32;
- image.format = ZPixmap;
- image.byte_order = LSBFirst;
- image.bitmap_unit = 8;
- image.bitmap_bit_order = LSBFirst;
- image.bytes_per_line = image_->bytes_per_line;
- image.red_mask = 0xff;
- image.green_mask = 0xff00;
- image.blue_mask = 0xff0000;
- image.data = image_->data;
-
- // Creates a pixmap and uploads from the XImage.
- unsigned long pixmap = XCreatePixmap(display_, window_,
- visible_width, visible_height,
- 32);
- GC gc = XCreateGC(display_, pixmap, 0, NULL);
- XPutImage(display_, pixmap, gc, &image,
- video_frame->visible_rect().x(),
- video_frame->visible_rect().y(),
- 0, 0,
- visible_width, visible_height);
- XFreeGC(display_, gc);
-
- // Creates the picture representing the pixmap.
- unsigned long picture = XRenderCreatePicture(
- display_, pixmap, GetRenderARGB32Format(display_), 0, NULL);
-
- // Composite the picture over the picture representing the window.
- XRenderComposite(display_, PictOpSrc, picture, 0,
- picture_, 0, 0, 0, 0, 0, 0,
- visible_width, visible_height);
-
- XRenderFreePicture(display_, picture);
- XFreePixmap(display_, pixmap);
- return;
- }
-
- // If XRender is not used, simply put the image to the server.
- // This will have a tearing effect but this is OK.
- // TODO(hclam): Upload the image to a pixmap and do XCopyArea()
- // to the window.
- GC gc = XCreateGC(display_, window_, 0, NULL);
- XPutImage(display_, window_, gc, image_,
- video_frame->visible_rect().x(),
- video_frame->visible_rect().y(),
- 0, 0, visible_width, visible_height);
- XFlush(display_);
- XFreeGC(display_, gc);
-}
-
-void X11VideoRenderer::Initialize(gfx::Size coded_size,
- gfx::Rect visible_rect) {
- CHECK(!image_);
- VLOG(0) << "Initializing X11 Renderer...";
-
- // Resize the window to fit that of the video.
- XResizeWindow(display_, window_, visible_rect.width(), visible_rect.height());
- image_ = CreateImage(display_, coded_size.width(), coded_size.height());
-
- // Testing XRender support. We'll use the very basic of XRender
- // so if it presents it is already good enough. We don't need
- // to check its version.
- int dummy;
- use_render_ = XRenderQueryExtension(display_, &dummy, &dummy);
-
- if (use_render_) {
- VLOG(0) << "Using XRender extension.";
-
- // If we are using XRender, we'll create a picture representing the
- // window.
- XWindowAttributes attr;
- XGetWindowAttributes(display_, window_, &attr);
-
- XRenderPictFormat* pictformat = XRenderFindVisualFormat(
- display_,
- attr.visual);
- CHECK(pictformat) << "XRender does not support default visual";
-
- picture_ = XRenderCreatePicture(display_, window_, pictformat, 0, NULL);
- CHECK(picture_) << "Backing picture not created";
- }
-}
diff --git a/chromium/media/tools/player_x11/x11_video_renderer.h b/chromium/media/tools/player_x11/x11_video_renderer.h
deleted file mode 100644
index d6c093868c3..00000000000
--- a/chromium/media/tools/player_x11/x11_video_renderer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_PLAYER_X11_X11_VIDEO_RENDERER_H_
-#define MEDIA_TOOLS_PLAYER_X11_X11_VIDEO_RENDERER_H_
-
-#include <X11/Xlib.h>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-class VideoFrame;
-}
-
-class X11VideoRenderer : public base::RefCountedThreadSafe<X11VideoRenderer> {
- public:
- X11VideoRenderer(Display* display, Window window);
-
- void Paint(const scoped_refptr<media::VideoFrame>& video_frame);
-
- private:
- friend class base::RefCountedThreadSafe<X11VideoRenderer>;
- ~X11VideoRenderer();
-
- // Initializes X11 rendering for the given dimensions.
- void Initialize(gfx::Size coded_size, gfx::Rect visible_rect);
-
- Display* display_;
- Window window_;
-
- // Image in heap that contains the RGBA data of the video frame.
- XImage* image_;
-
- // Picture represents the paint target. This is a picture located
- // in the server.
- unsigned long picture_;
-
- bool use_render_;
-
- DISALLOW_COPY_AND_ASSIGN(X11VideoRenderer);
-};
-
-#endif // MEDIA_TOOLS_PLAYER_X11_X11_VIDEO_RENDERER_H_
diff --git a/chromium/media/video/capture/OWNERS b/chromium/media/video/capture/OWNERS
index 4e965f7759c..92972c65cbf 100644
--- a/chromium/media/video/capture/OWNERS
+++ b/chromium/media/video/capture/OWNERS
@@ -1,2 +1,4 @@
+mcasas@chromium.org
perkj@chromium.org
+posciak@chromium.org
tommi@chromium.org
diff --git a/chromium/media/video/capture/android/OWNERS b/chromium/media/video/capture/android/OWNERS
deleted file mode 100644
index ad809814d3d..00000000000
--- a/chromium/media/video/capture/android/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-mcasas@chromium.org
diff --git a/chromium/media/video/capture/android/imageformat_list.h b/chromium/media/video/capture/android/imageformat_list.h
deleted file mode 100644
index fe8cfb232b6..00000000000
--- a/chromium/media/video/capture/android/imageformat_list.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file intentionally does not have header guards, it's included
-// inside a macro to generate enum and a java class for the values.
-
-#ifndef DEFINE_ANDROID_IMAGEFORMAT
-#error "DEFINE_ANDROID_IMAGEFORMAT should be defined."
-#endif
-
-// Android graphics ImageFormat mapping, see reference in:
-// http://developer.android.com/reference/android/graphics/ImageFormat.html
-
-DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_NV21, 17)
-DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_YV12, 842094169)
-
-DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_UNKNOWN, 0)
diff --git a/chromium/media/video/capture/android/video_capture_device_android.cc b/chromium/media/video/capture/android/video_capture_device_android.cc
index 836153df749..26d7fb3b3e3 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.cc
+++ b/chromium/media/video/capture/android/video_capture_device_android.cc
@@ -4,10 +4,8 @@
#include "media/video/capture/android/video_capture_device_android.h"
-#include <string>
#include "base/android/jni_android.h"
-#include "base/android/scoped_java_ref.h"
#include "base/strings/string_number_conversions.h"
#include "jni/VideoCapture_jni.h"
#include "media/video/capture/android/video_capture_device_factory_android.h"
@@ -97,8 +95,8 @@ void VideoCaptureDeviceAndroid::AllocateAndStart(
<< capture_format_.frame_size.ToString()
<< ", frame_rate=" << capture_format_.frame_rate;
- jint result = Java_VideoCapture_startCapture(env, j_capture_.obj());
- if (result < 0) {
+ ret = Java_VideoCapture_startCapture(env, j_capture_.obj());
+ if (!ret) {
SetErrorState("failed to start capture");
return;
}
@@ -119,8 +117,8 @@ void VideoCaptureDeviceAndroid::StopAndDeAllocate() {
JNIEnv* env = AttachCurrentThread();
- jint ret = Java_VideoCapture_stopCapture(env, j_capture_.obj());
- if (ret < 0) {
+ jboolean ret = Java_VideoCapture_stopCapture(env, j_capture_.obj());
+ if (!ret) {
SetErrorState("failed to stop capture");
return;
}
@@ -134,12 +132,11 @@ void VideoCaptureDeviceAndroid::StopAndDeAllocate() {
Java_VideoCapture_deallocate(env, j_capture_.obj());
}
-void VideoCaptureDeviceAndroid::OnFrameAvailable(
- JNIEnv* env,
- jobject obj,
- jbyteArray data,
- jint length,
- jint rotation) {
+void VideoCaptureDeviceAndroid::OnFrameAvailable(JNIEnv* env,
+ jobject obj,
+ jbyteArray data,
+ jint length,
+ jint rotation) {
DVLOG(3) << "VideoCaptureDeviceAndroid::OnFrameAvailable: length =" << length;
base::AutoLock lock(lock_);
@@ -174,6 +171,14 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(
env->ReleaseByteArrayElements(data, buffer, JNI_ABORT);
}
+void VideoCaptureDeviceAndroid::OnError(JNIEnv* env,
+ jobject obj,
+ jstring message) {
+ const char *native_string = env->GetStringUTFChars(message, JNI_FALSE);
+ SetErrorState(native_string);
+ env->ReleaseStringUTFChars(message, native_string);
+}
+
VideoPixelFormat VideoCaptureDeviceAndroid::GetColorspace() {
JNIEnv* env = AttachCurrentThread();
int current_capture_colorspace =
@@ -181,6 +186,8 @@ VideoPixelFormat VideoCaptureDeviceAndroid::GetColorspace() {
switch (current_capture_colorspace) {
case ANDROID_IMAGE_FORMAT_YV12:
return media::PIXEL_FORMAT_YV12;
+ case ANDROID_IMAGE_FORMAT_YUV_420_888:
+ return media::PIXEL_FORMAT_I420;
case ANDROID_IMAGE_FORMAT_NV21:
return media::PIXEL_FORMAT_NV21;
case ANDROID_IMAGE_FORMAT_UNKNOWN:
diff --git a/chromium/media/video/capture/android/video_capture_device_android.h b/chromium/media/video/capture/android/video_capture_device_android.h
index e4defa1bab4..832d64fbc8b 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.h
+++ b/chromium/media/video/capture/android/video_capture_device_android.h
@@ -31,12 +31,13 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
// Android graphics ImageFormat mapping, see reference in:
// http://developer.android.com/reference/android/graphics/ImageFormat.html
ANDROID_IMAGE_FORMAT_NV21 = 17,
+ ANDROID_IMAGE_FORMAT_YUV_420_888 = 35,
ANDROID_IMAGE_FORMAT_YV12 = 842094169,
ANDROID_IMAGE_FORMAT_UNKNOWN = 0,
};
explicit VideoCaptureDeviceAndroid(const Name& device_name);
- virtual ~VideoCaptureDeviceAndroid();
+ ~VideoCaptureDeviceAndroid() override;
static VideoCaptureDevice* Create(const Name& device_name);
static bool RegisterVideoCaptureDevice(JNIEnv* env);
@@ -47,9 +48,9 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
bool Init();
// VideoCaptureDevice implementation.
- virtual void AllocateAndStart(const VideoCaptureParams& params,
- scoped_ptr<Client> client) override;
- virtual void StopAndDeAllocate() override;
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) override;
+ void StopAndDeAllocate() override;
// Implement org.chromium.media.VideoCapture.nativeOnFrameAvailable.
void OnFrameAvailable(
@@ -59,6 +60,9 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
jint length,
jint rotation);
+ // Implement org.chromium.media.VideoCapture.nativeOnError.
+ void OnError(JNIEnv* env, jobject obj, jstring message);
+
private:
enum InternalState {
kIdle, // The device is opened but not in use.
diff --git a/chromium/media/video/capture/android/video_capture_device_factory_android.cc b/chromium/media/video/capture/android/video_capture_device_factory_android.cc
index 010c1949eb6..12030705b5d 100644
--- a/chromium/media/video/capture/android/video_capture_device_factory_android.cc
+++ b/chromium/media/video/capture/android/video_capture_device_factory_android.cc
@@ -5,7 +5,6 @@
#include "media/video/capture/android/video_capture_device_factory_android.h"
#include "base/android/jni_string.h"
-#include "base/android/scoped_java_ref.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "jni/VideoCaptureFactory_jni.h"
@@ -41,11 +40,11 @@ scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryAndroid::Create(
if (!base::StringToInt(device_name.id(), &id))
return scoped_ptr<VideoCaptureDevice>();
- VideoCaptureDeviceAndroid* video_capture_device(
+ scoped_ptr<VideoCaptureDeviceAndroid> video_capture_device(
new VideoCaptureDeviceAndroid(device_name));
if (video_capture_device->Init())
- return scoped_ptr<VideoCaptureDevice>(video_capture_device);
+ return video_capture_device.Pass();
DLOG(ERROR) << "Error creating Video Capture Device.";
return scoped_ptr<VideoCaptureDevice>();
@@ -58,21 +57,27 @@ void VideoCaptureDeviceFactoryAndroid::GetDeviceNames(
JNIEnv* env = AttachCurrentThread();
- int num_cameras = Java_VideoCaptureFactory_getNumberOfCameras(
- env, base::android::GetApplicationContext());
+ const jobject context = base::android::GetApplicationContext();
+ const int num_cameras = Java_VideoCaptureFactory_getNumberOfCameras(env,
+ context);
DVLOG(1) << "VideoCaptureDevice::GetDeviceNames: num_cameras=" << num_cameras;
if (num_cameras <= 0)
return;
for (int camera_id = num_cameras - 1; camera_id >= 0; --camera_id) {
base::android::ScopedJavaLocalRef<jstring> device_name =
- Java_VideoCaptureFactory_getDeviceName(env, camera_id);
+ Java_VideoCaptureFactory_getDeviceName(env, camera_id, context);
if (device_name.obj() == NULL)
continue;
+ const int capture_api_type =
+ Java_VideoCaptureFactory_getCaptureApiType(env, camera_id, context);
+
VideoCaptureDevice::Name name(
base::android::ConvertJavaStringToUTF8(device_name),
- base::IntToString(camera_id));
+ base::IntToString(camera_id),
+ static_cast<VideoCaptureDevice::Name::CaptureApiType>(
+ capture_api_type));
device_names->push_back(name);
DVLOG(1) << "VideoCaptureDeviceFactoryAndroid::GetDeviceNames: camera "
@@ -89,7 +94,8 @@ void VideoCaptureDeviceFactoryAndroid::GetDeviceSupportedFormats(
return;
JNIEnv* env = AttachCurrentThread();
base::android::ScopedJavaLocalRef<jobjectArray> collected_formats =
- Java_VideoCaptureFactory_getDeviceSupportedFormats(env, id);
+ Java_VideoCaptureFactory_getDeviceSupportedFormats(env,
+ base::android::GetApplicationContext(), id);
if (collected_formats.is_null())
return;
@@ -123,4 +129,11 @@ void VideoCaptureDeviceFactoryAndroid::GetDeviceSupportedFormats(
}
}
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryAndroid();
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/android/video_capture_device_factory_android.h b/chromium/media/video/capture/android/video_capture_device_factory_android.h
index 4ea4ad50f5d..994139390fe 100644
--- a/chromium/media/video/capture/android/video_capture_device_factory_android.h
+++ b/chromium/media/video/capture/android/video_capture_device_factory_android.h
@@ -25,12 +25,12 @@ class MEDIA_EXPORT VideoCaptureDeviceFactoryAndroid :
jlong nativeVideoCaptureDeviceAndroid);
VideoCaptureDeviceFactoryAndroid() {}
- virtual ~VideoCaptureDeviceFactoryAndroid() {}
+ ~VideoCaptureDeviceFactoryAndroid() override {}
- virtual scoped_ptr<VideoCaptureDevice> Create(
+ scoped_ptr<VideoCaptureDevice> Create(
const VideoCaptureDevice::Name& device_name) override;
- virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
- virtual void GetDeviceSupportedFormats(
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void GetDeviceSupportedFormats(
const VideoCaptureDevice::Name& device,
VideoCaptureFormats* supported_formats) override;
diff --git a/chromium/media/video/capture/fake_video_capture_device.cc b/chromium/media/video/capture/fake_video_capture_device.cc
index 6f4fd75174a..12a8a305452 100644
--- a/chromium/media/video/capture/fake_video_capture_device.cc
+++ b/chromium/media/video/capture/fake_video_capture_device.cc
@@ -4,10 +4,8 @@
#include "media/video/capture/fake_video_capture_device.h"
-#include <string>
#include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
#include "media/audio/fake_audio_input_stream.h"
#include "media/base/video_frame.h"
@@ -18,55 +16,72 @@
namespace media {
static const int kFakeCaptureBeepCycle = 10; // Visual beep every 0.5s.
-static const int kFakeCaptureCapabilityChangePeriod = 30;
-FakeVideoCaptureDevice::FakeVideoCaptureDevice()
- : capture_thread_("CaptureThread"),
+void DrawPacman(bool use_argb,
+ uint8_t* const data,
+ int frame_count,
+ int frame_interval,
+ const gfx::Size& frame_size) {
+ // |kN32_SkColorType| stands for the appropriiate RGBA/BGRA format.
+ const SkColorType colorspace =
+ use_argb ? kN32_SkColorType : kAlpha_8_SkColorType;
+ const SkImageInfo info = SkImageInfo::Make(frame_size.width(),
+ frame_size.height(),
+ colorspace,
+ kOpaque_SkAlphaType);
+ SkBitmap bitmap;
+ bitmap.setInfo(info);
+ bitmap.setPixels(data);
+ SkPaint paint;
+ paint.setStyle(SkPaint::kFill_Style);
+ SkCanvas canvas(bitmap);
+
+ // Equalize Alpha_8 that has light green background while RGBA has white.
+ if (use_argb) {
+ const SkRect full_frame = SkRect::MakeWH(frame_size.width(),
+ frame_size.height());
+ paint.setARGB(255, 0, 127, 0);
+ canvas.drawRect(full_frame, paint);
+ }
+ paint.setColor(SK_ColorGREEN);
+
+ // Draw a sweeping circle to show an animation.
+ const int end_angle = (3 * kFakeCaptureBeepCycle * frame_count % 361);
+ const int radius = std::min(frame_size.width(), frame_size.height()) / 4;
+ const SkRect rect = SkRect::MakeXYWH(frame_size.width() / 2 - radius,
+ frame_size.height() / 2 - radius,
+ 2 * radius,
+ 2 * radius);
+ canvas.drawArc(rect, 0, end_angle, true, paint);
+
+ // Draw current time.
+ const int elapsed_ms = frame_interval * frame_count;
+ const int milliseconds = elapsed_ms % 1000;
+ const int seconds = (elapsed_ms / 1000) % 60;
+ const int minutes = (elapsed_ms / 1000 / 60) % 60;
+ const int hours = (elapsed_ms / 1000 / 60 / 60) % 60;
+
+ const std::string time_string = base::StringPrintf("%d:%02d:%02d:%03d %d",
+ hours, minutes, seconds, milliseconds, frame_count);
+ canvas.scale(3, 3);
+ canvas.drawText(time_string.data(), time_string.length(), 30, 20, paint);
+}
+
+FakeVideoCaptureDevice::FakeVideoCaptureDevice(
+ FakeVideoCaptureDeviceType device_type)
+ : device_type_(device_type),
frame_count_(0),
- format_roster_index_(0) {}
+ weak_factory_(this) {}
FakeVideoCaptureDevice::~FakeVideoCaptureDevice() {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!capture_thread_.IsRunning());
}
void FakeVideoCaptureDevice::AllocateAndStart(
const VideoCaptureParams& params,
scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!capture_thread_.IsRunning());
-
- capture_thread_.Start();
- capture_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&FakeVideoCaptureDevice::OnAllocateAndStart,
- base::Unretained(this),
- params,
- base::Passed(&client)));
-}
-void FakeVideoCaptureDevice::StopAndDeAllocate() {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(capture_thread_.IsRunning());
- capture_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&FakeVideoCaptureDevice::OnStopAndDeAllocate,
- base::Unretained(this)));
- capture_thread_.Stop();
-}
-
-void FakeVideoCaptureDevice::PopulateVariableFormatsRoster(
- const VideoCaptureFormats& formats) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!capture_thread_.IsRunning());
- format_roster_ = formats;
- format_roster_index_ = 0;
-}
-
-void FakeVideoCaptureDevice::OnAllocateAndStart(
- const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client) {
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
client_ = client.Pass();
// Incoming |params| can be none of the supported formats, so we get the
@@ -74,115 +89,121 @@ void FakeVideoCaptureDevice::OnAllocateAndStart(
// the supported ones, when http://crbug.com/309554 is verified.
DCHECK_EQ(params.requested_format.pixel_format, PIXEL_FORMAT_I420);
capture_format_.pixel_format = params.requested_format.pixel_format;
- capture_format_.frame_rate = 30;
- if (params.requested_format.frame_size.width() > 640)
+ capture_format_.frame_rate = 30.0;
+ if (params.requested_format.frame_size.width() > 1280)
+ capture_format_.frame_size.SetSize(1920, 1080);
+ else if (params.requested_format.frame_size.width() > 640)
capture_format_.frame_size.SetSize(1280, 720);
else if (params.requested_format.frame_size.width() > 320)
capture_format_.frame_size.SetSize(640, 480);
else
capture_format_.frame_size.SetSize(320, 240);
- const size_t fake_frame_size =
- VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
- fake_frame_.reset(new uint8[fake_frame_size]);
-
- capture_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&FakeVideoCaptureDevice::OnCaptureTask,
- base::Unretained(this)));
+
+ if (device_type_ == USING_OWN_BUFFERS ||
+ device_type_ == USING_OWN_BUFFERS_TRIPLANAR) {
+ fake_frame_.reset(new uint8[VideoFrame::AllocationSize(
+ VideoFrame::I420, capture_format_.frame_size)]);
+ BeepAndScheduleNextCapture(
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers,
+ weak_factory_.GetWeakPtr()));
+ } else if (device_type_ == USING_CLIENT_BUFFERS_I420 ||
+ device_type_ == USING_CLIENT_BUFFERS_GPU) {
+ DVLOG(1) << "starting with " << (device_type_ == USING_CLIENT_BUFFERS_I420
+ ? "Client buffers"
+ : "GpuMemoryBuffers");
+ BeepAndScheduleNextCapture(base::Bind(
+ &FakeVideoCaptureDevice::CaptureUsingClientBuffers,
+ weak_factory_.GetWeakPtr(), (device_type_ == USING_CLIENT_BUFFERS_I420
+ ? PIXEL_FORMAT_I420
+ : PIXEL_FORMAT_GPUMEMORYBUFFER)));
+ } else {
+ client_->OnError("Unknown Fake Video Capture Device type.");
+ }
}
-void FakeVideoCaptureDevice::OnStopAndDeAllocate() {
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+void FakeVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
client_.reset();
}
-void FakeVideoCaptureDevice::OnCaptureTask() {
- if (!client_)
- return;
-
- const size_t frame_size =
- VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
+void FakeVideoCaptureDevice::CaptureUsingOwnBuffers() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const size_t frame_size = capture_format_.ImageAllocationSize();
memset(fake_frame_.get(), 0, frame_size);
- SkImageInfo info = SkImageInfo::MakeA8(capture_format_.frame_size.width(),
- capture_format_.frame_size.height());
- SkBitmap bitmap;
- bitmap.installPixels(info, fake_frame_.get(), info.width());
- SkCanvas canvas(bitmap);
+ DrawPacman(false /* use_argb */,
+ fake_frame_.get(),
+ frame_count_,
+ kFakeCapturePeriodMs,
+ capture_format_.frame_size);
- // Draw a sweeping circle to show an animation.
- int radius = std::min(capture_format_.frame_size.width(),
- capture_format_.frame_size.height()) / 4;
- SkRect rect =
- SkRect::MakeXYWH(capture_format_.frame_size.width() / 2 - radius,
- capture_format_.frame_size.height() / 2 - radius,
- 2 * radius,
- 2 * radius);
-
- SkPaint paint;
- paint.setStyle(SkPaint::kFill_Style);
+ // Give the captured frame to the client.
+ if (device_type_ == USING_OWN_BUFFERS) {
+ client_->OnIncomingCapturedData(fake_frame_.get(),
+ frame_size,
+ capture_format_,
+ 0 /* rotation */,
+ base::TimeTicks::Now());
+ } else if (device_type_ == USING_OWN_BUFFERS_TRIPLANAR) {
+ client_->OnIncomingCapturedYuvData(
+ fake_frame_.get(),
+ fake_frame_.get() + capture_format_.frame_size.GetArea(),
+ fake_frame_.get() + capture_format_.frame_size.GetArea() * 5 / 4,
+ capture_format_.frame_size.width(),
+ capture_format_.frame_size.width() / 2,
+ capture_format_.frame_size.width() / 2,
+ capture_format_,
+ 0 /* rotation */,
+ base::TimeTicks::Now());
+ }
+ BeepAndScheduleNextCapture(
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers,
+ weak_factory_.GetWeakPtr()));
+}
- // Only Y plane is being drawn and this gives 50% grey on the Y
- // plane. The result is a light green color in RGB space.
- paint.setAlpha(128);
+void FakeVideoCaptureDevice::CaptureUsingClientBuffers(
+ VideoPixelFormat pixel_format) {
+ DCHECK(thread_checker_.CalledOnValidThread());
- int end_angle = (frame_count_ % kFakeCaptureBeepCycle * 360) /
- kFakeCaptureBeepCycle;
- if (!end_angle)
- end_angle = 360;
- canvas.drawArc(rect, 0, end_angle, true, paint);
+ scoped_ptr<VideoCaptureDevice::Client::Buffer> capture_buffer(
+ client_->ReserveOutputBuffer(pixel_format, capture_format_.frame_size));
+ DLOG_IF(ERROR, !capture_buffer) << "Couldn't allocate Capture Buffer";
+
+ if (capture_buffer.get()) {
+ uint8_t* const data_ptr = static_cast<uint8_t*>(capture_buffer->data());
+ DCHECK(data_ptr) << "Buffer has NO backing memory";
+ DCHECK_EQ(capture_buffer->GetType(), gfx::SHARED_MEMORY_BUFFER);
+ memset(data_ptr, 0, capture_buffer->size());
+
+ DrawPacman(
+ (pixel_format == media::PIXEL_FORMAT_GPUMEMORYBUFFER), /* use_argb */
+ data_ptr,
+ frame_count_,
+ kFakeCapturePeriodMs,
+ capture_format_.frame_size);
+
+ // Give the captured frame to the client.
+ const VideoCaptureFormat format(capture_format_.frame_size,
+ capture_format_.frame_rate,
+ pixel_format);
+ client_->OnIncomingCapturedBuffer(capture_buffer.Pass(), format,
+ base::TimeTicks::Now());
+ }
- // Draw current time.
- int elapsed_ms = kFakeCaptureTimeoutMs * frame_count_;
- int milliseconds = elapsed_ms % 1000;
- int seconds = (elapsed_ms / 1000) % 60;
- int minutes = (elapsed_ms / 1000 / 60) % 60;
- int hours = (elapsed_ms / 1000 / 60 / 60) % 60;
-
- std::string time_string =
- base::StringPrintf("%d:%02d:%02d:%03d %d", hours, minutes,
- seconds, milliseconds, frame_count_);
- canvas.scale(3, 3);
- canvas.drawText(time_string.data(), time_string.length(), 30, 20,
- paint);
+ BeepAndScheduleNextCapture(
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingClientBuffers,
+ weak_factory_.GetWeakPtr(), pixel_format));
+}
- if (frame_count_ % kFakeCaptureBeepCycle == 0) {
- // Generate a synchronized beep sound if there is one audio input
- // stream created.
+void FakeVideoCaptureDevice::BeepAndScheduleNextCapture(
+ const base::Closure& next_capture) {
+ // Generate a synchronized beep sound every so many frames.
+ if (frame_count_++ % kFakeCaptureBeepCycle == 0)
FakeAudioInputStream::BeepOnce();
- }
-
- frame_count_++;
- // Give the captured frame to the client.
- client_->OnIncomingCapturedData(fake_frame_.get(),
- frame_size,
- capture_format_,
- 0,
- base::TimeTicks::Now());
- if (!(frame_count_ % kFakeCaptureCapabilityChangePeriod) &&
- format_roster_.size() > 0U) {
- Reallocate();
- }
// Reschedule next CaptureTask.
- capture_thread_.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&FakeVideoCaptureDevice::OnCaptureTask,
- base::Unretained(this)),
- base::TimeDelta::FromMilliseconds(kFakeCaptureTimeoutMs));
-}
-
-void FakeVideoCaptureDevice::Reallocate() {
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
- capture_format_ =
- format_roster_.at(++format_roster_index_ % format_roster_.size());
- DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_I420);
- DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution "
- << capture_format_.frame_size.ToString();
-
- const size_t fake_frame_size =
- VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
- fake_frame_.reset(new uint8[fake_frame_size]);
+ base::MessageLoop::current()->PostDelayedTask(FROM_HERE, next_capture,
+ base::TimeDelta::FromMilliseconds(kFakeCapturePeriodMs));
}
} // namespace media
diff --git a/chromium/media/video/capture/fake_video_capture_device.h b/chromium/media/video/capture/fake_video_capture_device.h
index 30d1b3358f5..f4a19f9edb9 100644
--- a/chromium/media/video/capture/fake_video_capture_device.h
+++ b/chromium/media/video/capture/fake_video_capture_device.h
@@ -12,6 +12,7 @@
#include "base/atomicops.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
#include "media/video/capture/video_capture_device.h"
@@ -20,46 +21,45 @@ namespace media {
class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
public:
- static const int kFakeCaptureTimeoutMs = 50;
+ enum FakeVideoCaptureDeviceType {
+ USING_OWN_BUFFERS,
+ USING_OWN_BUFFERS_TRIPLANAR,
+ USING_CLIENT_BUFFERS_I420,
+ USING_CLIENT_BUFFERS_GPU,
+ };
- FakeVideoCaptureDevice();
+ static int FakeCapturePeriodMs() { return kFakeCapturePeriodMs; }
+
+ explicit FakeVideoCaptureDevice(FakeVideoCaptureDeviceType device_type);
~FakeVideoCaptureDevice() override;
// VideoCaptureDevice implementation.
void AllocateAndStart(const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client) override;
+ scoped_ptr<Client> client) override;
void StopAndDeAllocate() override;
- // Sets the formats to use sequentially when the device is configured as
- // variable capture resolution. Works only before AllocateAndStart() or
- // after StopAndDeallocate().
- void PopulateVariableFormatsRoster(const VideoCaptureFormats& formats);
-
private:
- // Called on the |capture_thread_| only.
- void OnAllocateAndStart(const VideoCaptureParams& params,
- scoped_ptr<Client> client);
- void OnStopAndDeAllocate();
- void OnCaptureTask();
- void Reallocate();
-
- // |thread_checker_| is used to check that destructor, AllocateAndStart() and
- // StopAndDeAllocate() are called in the correct thread that owns the object.
+ static const int kFakeCapturePeriodMs = 50;
+
+ void CaptureUsingOwnBuffers();
+ void CaptureUsingClientBuffers(VideoPixelFormat pixel_format);
+ void BeepAndScheduleNextCapture(const base::Closure& next_capture);
+
+ // |thread_checker_| is used to check that all methods are called in the
+ // correct thread that owns the object.
base::ThreadChecker thread_checker_;
- base::Thread capture_thread_;
- // The following members are only used on the |capture_thread_|.
+ const FakeVideoCaptureDeviceType device_type_;
+
scoped_ptr<VideoCaptureDevice::Client> client_;
+ // |fake_frame_| is used for capturing on Own Buffers.
scoped_ptr<uint8[]> fake_frame_;
int frame_count_;
VideoCaptureFormat capture_format_;
- // When the device is allowed to change resolution, this vector holds the
- // available ones, used sequentially restarting at the end. These two members
- // are initialised in PopulateFormatRoster() before |capture_thread_| is
- // running and are subsequently read-only in that thread.
- std::vector<VideoCaptureFormat> format_roster_;
- int format_roster_index_;
+ // FakeVideoCaptureDevice post tasks to itself for frame construction and
+ // needs to deal with asynchronous StopAndDeallocate().
+ base::WeakPtrFactory<FakeVideoCaptureDevice> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(FakeVideoCaptureDevice);
};
diff --git a/chromium/media/video/capture/fake_video_capture_device_factory.cc b/chromium/media/video/capture/fake_video_capture_device_factory.cc
index c8f4706307a..76de761c8fc 100644
--- a/chromium/media/video/capture/fake_video_capture_device_factory.cc
+++ b/chromium/media/video/capture/fake_video_capture_device_factory.cc
@@ -4,7 +4,10 @@
#include "media/video/capture/fake_video_capture_device_factory.h"
+#include "base/command_line.h"
+#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
+#include "media/base/media_switches.h"
#include "media/video/capture/fake_video_capture_device.h"
namespace media {
@@ -16,10 +19,26 @@ FakeVideoCaptureDeviceFactory::FakeVideoCaptureDeviceFactory()
scoped_ptr<VideoCaptureDevice> FakeVideoCaptureDeviceFactory::Create(
const VideoCaptureDevice::Name& device_name) {
DCHECK(thread_checker_.CalledOnValidThread());
+
+ const std::string option = base::CommandLine::ForCurrentProcess()->
+ GetSwitchValueASCII(switches::kUseFakeDeviceForMediaStream);
+
+ FakeVideoCaptureDevice::FakeVideoCaptureDeviceType fake_vcd_type;
+ if (option.empty())
+ fake_vcd_type = FakeVideoCaptureDevice::USING_OWN_BUFFERS;
+ else if (base:: strcasecmp(option.c_str(), "triplanar") == 0)
+ fake_vcd_type = FakeVideoCaptureDevice::USING_OWN_BUFFERS_TRIPLANAR;
+ else if (base:: strcasecmp(option.c_str(), "gpu") == 0)
+ fake_vcd_type = FakeVideoCaptureDevice::USING_CLIENT_BUFFERS_GPU;
+ else
+ fake_vcd_type = FakeVideoCaptureDevice::USING_CLIENT_BUFFERS_I420;
+
for (int n = 0; n < number_of_devices_; ++n) {
std::string possible_id = base::StringPrintf("/dev/video%d", n);
- if (device_name.id().compare(possible_id) == 0)
- return scoped_ptr<VideoCaptureDevice>(new FakeVideoCaptureDevice());
+ if (device_name.id().compare(possible_id) == 0) {
+ return scoped_ptr<VideoCaptureDevice>(
+ new FakeVideoCaptureDevice(fake_vcd_type));
+ }
}
return scoped_ptr<VideoCaptureDevice>();
}
@@ -31,10 +50,14 @@ void FakeVideoCaptureDeviceFactory::GetDeviceNames(
for (int n = 0; n < number_of_devices_; ++n) {
VideoCaptureDevice::Name name(base::StringPrintf("fake_device_%d", n),
base::StringPrintf("/dev/video%d", n)
-#if defined(OS_MACOSX)
+#if defined(OS_LINUX)
+ , VideoCaptureDevice::Name::V4L2_SINGLE_PLANE
+#elif defined(OS_MACOSX)
, VideoCaptureDevice::Name::AVFOUNDATION
#elif defined(OS_WIN)
, VideoCaptureDevice::Name::DIRECT_SHOW
+#elif defined(OS_ANDROID)
+ , VideoCaptureDevice::Name::API2_LEGACY
#endif
);
device_names->push_back(name);
@@ -45,15 +68,15 @@ void FakeVideoCaptureDeviceFactory::GetDeviceSupportedFormats(
const VideoCaptureDevice::Name& device,
VideoCaptureFormats* supported_formats) {
DCHECK(thread_checker_.CalledOnValidThread());
- const int frame_rate = 1000 / FakeVideoCaptureDevice::kFakeCaptureTimeoutMs;
+ const int frame_rate = 1000 / FakeVideoCaptureDevice::FakeCapturePeriodMs();
const gfx::Size supported_sizes[] = {gfx::Size(320, 240),
gfx::Size(640, 480),
- gfx::Size(1280, 720)};
+ gfx::Size(1280, 720),
+ gfx::Size(1920, 1080)};
supported_formats->clear();
- for (size_t i = 0; i < arraysize(supported_sizes); ++i) {
- supported_formats->push_back(VideoCaptureFormat(supported_sizes[i],
- frame_rate,
- media::PIXEL_FORMAT_I420));
+ for (const auto& size : supported_sizes) {
+ supported_formats->push_back(
+ VideoCaptureFormat(size, frame_rate, media::PIXEL_FORMAT_I420));
}
}
diff --git a/chromium/media/video/capture/fake_video_capture_device_unittest.cc b/chromium/media/video/capture/fake_video_capture_device_unittest.cc
index 7d1fabedfb4..8890454f366 100644
--- a/chromium/media/video/capture/fake_video_capture_device_unittest.cc
+++ b/chromium/media/video/capture/fake_video_capture_device_unittest.cc
@@ -7,10 +7,10 @@
#include "base/run_loop.h"
#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
+#include "media/base/video_capture_types.h"
#include "media/video/capture/fake_video_capture_device.h"
#include "media/video/capture/fake_video_capture_device_factory.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -19,38 +19,88 @@ using ::testing::SaveArg;
namespace media {
-class MockClient : public media::VideoCaptureDevice::Client {
- public:
- MOCK_METHOD2(ReserveOutputBuffer,
- scoped_refptr<Buffer>(media::VideoFrame::Format format,
- const gfx::Size& dimensions));
- MOCK_METHOD0(OnErr, void());
+namespace {
- explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
- : main_thread_(base::MessageLoopProxy::current()), frame_cb_(frame_cb) {}
+static const FakeVideoCaptureDevice::FakeVideoCaptureDeviceType
+kCaptureTypes[] = {
+ FakeVideoCaptureDevice::USING_OWN_BUFFERS,
+ FakeVideoCaptureDevice::USING_OWN_BUFFERS_TRIPLANAR,
+ FakeVideoCaptureDevice::USING_CLIENT_BUFFERS_I420,
+ FakeVideoCaptureDevice::USING_CLIENT_BUFFERS_GPU,
+};
- virtual void OnError(const std::string& error_message) override {
- OnErr();
+// This class is a Client::Buffer that allocates and frees the requested |size|.
+class MockBuffer : public VideoCaptureDevice::Client::Buffer {
+ public:
+ MockBuffer(int buffer_id, size_t size)
+ : id_(buffer_id), size_(size), data_(new uint8[size_]) {}
+ ~MockBuffer() override { delete[] data_; }
+
+ int id() const override { return id_; }
+ size_t size() const override { return size_; }
+ void* data() override { return data_; }
+ gfx::GpuMemoryBufferType GetType() override {
+ return gfx::SHARED_MEMORY_BUFFER;
}
+ ClientBuffer AsClientBuffer() override { return nullptr; }
+
+ private:
+ const int id_;
+ const size_t size_;
+ uint8* const data_;
+};
- virtual void OnIncomingCapturedData(const uint8* data,
- int length,
- const VideoCaptureFormat& format,
- int rotation,
- base::TimeTicks timestamp) override {
- main_thread_->PostTask(FROM_HERE, base::Bind(frame_cb_, format));
+class MockClient : public VideoCaptureDevice::Client {
+ public:
+ MOCK_METHOD1(OnError, void(const std::string& reason));
+
+ explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
+ : frame_cb_(frame_cb) {}
+
+ // Client virtual methods for capturing using Device Buffers.
+ void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& format,
+ int rotation,
+ const base::TimeTicks& timestamp) {
+ frame_cb_.Run(format);
+ }
+ void OnIncomingCapturedYuvData(const uint8* y_data,
+ const uint8* u_data,
+ const uint8* v_data,
+ size_t y_stride,
+ size_t u_stride,
+ size_t v_stride,
+ const VideoCaptureFormat& frame_format,
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp) {
+ frame_cb_.Run(frame_format);
}
- virtual void OnIncomingCapturedVideoFrame(
- const scoped_refptr<Buffer>& buffer,
- const media::VideoCaptureFormat& buffer_format,
+ // Virtual methods for capturing using Client's Buffers.
+ scoped_ptr<Buffer> ReserveOutputBuffer(media::VideoPixelFormat format,
+ const gfx::Size& dimensions) {
+ EXPECT_TRUE(format == PIXEL_FORMAT_I420 ||
+ format == PIXEL_FORMAT_GPUMEMORYBUFFER);
+ EXPECT_GT(dimensions.GetArea(), 0);
+ const VideoCaptureFormat frame_format(dimensions, 0.0, format);
+ return make_scoped_ptr(
+ new MockBuffer(0, frame_format.ImageAllocationSize()));
+ }
+ void OnIncomingCapturedBuffer(scoped_ptr<Buffer> buffer,
+ const VideoCaptureFormat& frame_format,
+ const base::TimeTicks& timestamp) {
+ frame_cb_.Run(frame_format);
+ }
+ void OnIncomingCapturedVideoFrame(
+ scoped_ptr<Buffer> buffer,
const scoped_refptr<media::VideoFrame>& frame,
- base::TimeTicks timestamp) override {
- NOTREACHED();
+ const base::TimeTicks& timestamp) {
+ VideoCaptureFormat format(frame->natural_size(), 30.0, PIXEL_FORMAT_I420);
+ frame_cb_.Run(format);
}
private:
- scoped_refptr<base::SingleThreadTaskRunner> main_thread_;
base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
};
@@ -58,10 +108,10 @@ class DeviceEnumerationListener :
public base::RefCounted<DeviceEnumerationListener> {
public:
MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
- void(media::VideoCaptureDevice::Names* names));
+ void(VideoCaptureDevice::Names* names));
// GMock doesn't support move-only arguments, so we use this forward method.
void OnEnumeratedDevicesCallback(
- scoped_ptr<media::VideoCaptureDevice::Names> names) {
+ scoped_ptr<VideoCaptureDevice::Names> names) {
OnEnumeratedDevicesCallbackPtr(names.release());
}
@@ -70,10 +120,12 @@ class DeviceEnumerationListener :
virtual ~DeviceEnumerationListener() {}
};
-class FakeVideoCaptureDeviceTest : public testing::Test {
- protected:
- typedef media::VideoCaptureDevice::Client Client;
+} // namespace
+class FakeVideoCaptureDeviceTest
+ : public testing::TestWithParam<
+ FakeVideoCaptureDevice::FakeVideoCaptureDeviceType>{
+ protected:
FakeVideoCaptureDeviceTest()
: loop_(new base::MessageLoop()),
client_(new MockClient(
@@ -83,7 +135,9 @@ class FakeVideoCaptureDeviceTest : public testing::Test {
device_enumeration_listener_ = new DeviceEnumerationListener();
}
- void SetUp() override {}
+ void SetUp() override {
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
+ }
void OnFrameCaptured(const VideoCaptureFormat& format) {
last_format_ = format;
@@ -95,8 +149,8 @@ class FakeVideoCaptureDeviceTest : public testing::Test {
run_loop_->Run();
}
- scoped_ptr<media::VideoCaptureDevice::Names> EnumerateDevices() {
- media::VideoCaptureDevice::Names* names;
+ scoped_ptr<VideoCaptureDevice::Names> EnumerateDevices() {
+ VideoCaptureDevice::Names* names;
EXPECT_CALL(*device_enumeration_listener_.get(),
OnEnumeratedDevicesCallbackPtr(_)).WillOnce(SaveArg<0>(&names));
@@ -104,105 +158,70 @@ class FakeVideoCaptureDeviceTest : public testing::Test {
base::Bind(&DeviceEnumerationListener::OnEnumeratedDevicesCallback,
device_enumeration_listener_));
base::MessageLoop::current()->RunUntilIdle();
- return scoped_ptr<media::VideoCaptureDevice::Names>(names);
+ return scoped_ptr<VideoCaptureDevice::Names>(names);
}
const VideoCaptureFormat& last_format() const { return last_format_; }
VideoCaptureDevice::Names names_;
- scoped_ptr<base::MessageLoop> loop_;
+ const scoped_ptr<base::MessageLoop> loop_;
scoped_ptr<base::RunLoop> run_loop_;
scoped_ptr<MockClient> client_;
scoped_refptr<DeviceEnumerationListener> device_enumeration_listener_;
VideoCaptureFormat last_format_;
- scoped_ptr<VideoCaptureDeviceFactory> video_capture_device_factory_;
+ const scoped_ptr<VideoCaptureDeviceFactory> video_capture_device_factory_;
};
-TEST_F(FakeVideoCaptureDeviceTest, Capture) {
- scoped_ptr<media::VideoCaptureDevice::Names> names(EnumerateDevices());
-
- ASSERT_GT(static_cast<int>(names->size()), 0);
+TEST_P(FakeVideoCaptureDeviceTest, CaptureUsing) {
+ const scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
+ ASSERT_FALSE(names->empty());
- scoped_ptr<VideoCaptureDevice> device(
- video_capture_device_factory_->Create(names->front()));
+ scoped_ptr<VideoCaptureDevice> device(new FakeVideoCaptureDevice(GetParam()));
ASSERT_TRUE(device);
- EXPECT_CALL(*client_, OnErr()).Times(0);
-
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(640, 480);
capture_params.requested_format.frame_rate = 30;
capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
device->AllocateAndStart(capture_params, client_.Pass());
+
WaitForCapturedFrame();
EXPECT_EQ(last_format().frame_size.width(), 640);
EXPECT_EQ(last_format().frame_size.height(), 480);
- EXPECT_EQ(last_format().frame_rate, 30);
+ EXPECT_EQ(last_format().frame_rate, 30.0);
device->StopAndDeAllocate();
}
+INSTANTIATE_TEST_CASE_P(,
+ FakeVideoCaptureDeviceTest,
+ testing::ValuesIn(kCaptureTypes));
+
TEST_F(FakeVideoCaptureDeviceTest, GetDeviceSupportedFormats) {
scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
VideoCaptureFormats supported_formats;
- VideoCaptureDevice::Names::iterator names_iterator;
- for (names_iterator = names->begin(); names_iterator != names->end();
- ++names_iterator) {
+ for (const auto& names_iterator : *names) {
video_capture_device_factory_->GetDeviceSupportedFormats(
- *names_iterator, &supported_formats);
- EXPECT_EQ(supported_formats.size(), 3u);
+ names_iterator, &supported_formats);
+ ASSERT_EQ(supported_formats.size(), 4u);
EXPECT_EQ(supported_formats[0].frame_size.width(), 320);
EXPECT_EQ(supported_formats[0].frame_size.height(), 240);
- EXPECT_EQ(supported_formats[0].pixel_format, media::PIXEL_FORMAT_I420);
- EXPECT_GE(supported_formats[0].frame_rate, 20);
+ EXPECT_EQ(supported_formats[0].pixel_format, PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[0].frame_rate, 20.0);
EXPECT_EQ(supported_formats[1].frame_size.width(), 640);
EXPECT_EQ(supported_formats[1].frame_size.height(), 480);
- EXPECT_EQ(supported_formats[1].pixel_format, media::PIXEL_FORMAT_I420);
- EXPECT_GE(supported_formats[1].frame_rate, 20);
+ EXPECT_EQ(supported_formats[1].pixel_format, PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[1].frame_rate, 20.0);
EXPECT_EQ(supported_formats[2].frame_size.width(), 1280);
EXPECT_EQ(supported_formats[2].frame_size.height(), 720);
- EXPECT_EQ(supported_formats[2].pixel_format, media::PIXEL_FORMAT_I420);
- EXPECT_GE(supported_formats[2].frame_rate, 20);
+ EXPECT_EQ(supported_formats[2].pixel_format, PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[2].frame_rate, 20.0);
+ EXPECT_EQ(supported_formats[3].frame_size.width(), 1920);
+ EXPECT_EQ(supported_formats[3].frame_size.height(), 1080);
+ EXPECT_EQ(supported_formats[3].pixel_format, PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[3].frame_rate, 20.0);
}
}
-// Disabled, http://crbug.com/407061 .
-TEST_F(FakeVideoCaptureDeviceTest, DISABLED_CaptureVariableResolution) {
- scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
-
- VideoCaptureParams capture_params;
- capture_params.requested_format.frame_size.SetSize(640, 480);
- capture_params.requested_format.frame_rate = 30;
- capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
- capture_params.resolution_change_policy =
- RESOLUTION_POLICY_DYNAMIC_WITHIN_LIMIT;
-
- ASSERT_GT(static_cast<int>(names->size()), 0);
-
- scoped_ptr<VideoCaptureDevice> device(
- video_capture_device_factory_->Create(names->front()));
- ASSERT_TRUE(device);
-
- // Configure the FakeVideoCaptureDevice to use all its formats as roster.
- VideoCaptureFormats formats;
- video_capture_device_factory_->GetDeviceSupportedFormats(names->front(),
- &formats);
- static_cast<FakeVideoCaptureDevice*>(device.get())->
- PopulateVariableFormatsRoster(formats);
-
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
- int action_count = 200;
-
- device->AllocateAndStart(capture_params, client_.Pass());
-
- // We set TimeWait to 200 action timeouts and this should be enough for at
- // least action_count/kFakeCaptureCapabilityChangePeriod calls.
- for (int i = 0; i < action_count; ++i) {
- WaitForCapturedFrame();
- }
- device->StopAndDeAllocate();
-}
-
}; // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.cc b/chromium/media/video/capture/file_video_capture_device.cc
index 84a2d156000..5584c0b2093 100644
--- a/chromium/media/video/capture/file_video_capture_device.cc
+++ b/chromium/media/video/capture/file_video_capture_device.cc
@@ -4,12 +4,11 @@
#include "media/video/capture/file_video_capture_device.h"
-#include <string>
#include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
+#include "media/base/video_capture_types.h"
namespace media {
static const int kY4MHeaderMaxSize = 200;
@@ -123,7 +122,9 @@ int64 FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(
base::File FileVideoCaptureDevice::OpenFileForRead(
const base::FilePath& file_path) {
base::File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
- CHECK(file.IsValid()) << file_path.value();
+ DLOG_IF(ERROR, file.IsValid())
+ << file_path.value()
+ << ", error: " << base::File::ErrorToString(file.error_details());
return file.Pass();
}
@@ -167,10 +168,10 @@ void FileVideoCaptureDevice::StopAndDeAllocate() {
capture_thread_.Stop();
}
-int FileVideoCaptureDevice::CalculateFrameSize() {
+int FileVideoCaptureDevice::CalculateFrameSize() const {
DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_I420);
DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
- return capture_format_.frame_size.GetArea() * 12 / 8;
+ return capture_format_.ImageAllocationSize();
}
void FileVideoCaptureDevice::OnAllocateAndStart(
@@ -183,6 +184,10 @@ void FileVideoCaptureDevice::OnAllocateAndStart(
// Open the file and parse the header. Get frame size and format.
DCHECK(!file_.IsValid());
file_ = OpenFileForRead(file_path_);
+ if (!file_.IsValid()) {
+ client_->OnError("Could not open Video file");
+ return;
+ }
first_frame_byte_index_ =
ParseFileAndExtractVideoFormat(&file_, &capture_format_);
current_byte_index_ = first_frame_byte_index_;
@@ -212,6 +217,7 @@ void FileVideoCaptureDevice::OnCaptureTask() {
DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
if (!client_)
return;
+ const base::TimeTicks timestamp_before_reading = base::TimeTicks::Now();
int result = file_.Read(current_byte_index_,
reinterpret_cast<char*>(video_frame_.get()),
frame_size_);
@@ -236,11 +242,19 @@ void FileVideoCaptureDevice::OnCaptureTask() {
0,
base::TimeTicks::Now());
// Reschedule next CaptureTask.
+ const base::TimeDelta frame_interval =
+ base::TimeDelta::FromMicroseconds(1E6 / capture_format_.frame_rate);
+ base::TimeDelta next_on_capture_timedelta = frame_interval -
+ (base::TimeTicks::Now() - timestamp_before_reading);
+ if (next_on_capture_timedelta.InMilliseconds() < 0) {
+ DLOG(WARNING) << "Frame reading took longer than the frame interval.";
+ next_on_capture_timedelta = frame_interval;
+ }
base::MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
base::Unretained(this)),
- base::TimeDelta::FromSeconds(1) / capture_format_.frame_rate);
+ next_on_capture_timedelta);
}
} // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.h b/chromium/media/video/capture/file_video_capture_device.h
index 7b6cd1d848d..bd3b2985b9f 100644
--- a/chromium/media/video/capture/file_video_capture_device.h
+++ b/chromium/media/video/capture/file_video_capture_device.h
@@ -43,7 +43,7 @@ class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
private:
// Returns size in bytes of an I420 frame, not including possible paddings,
// defined by |capture_format_|.
- int CalculateFrameSize();
+ int CalculateFrameSize() const;
// Called on the |capture_thread_|.
void OnAllocateAndStart(const VideoCaptureParams& params,
diff --git a/chromium/media/video/capture/file_video_capture_device_factory.cc b/chromium/media/video/capture/file_video_capture_device_factory.cc
index 8edcb4e4d81..d5dd112d9c4 100644
--- a/chromium/media/video/capture/file_video_capture_device_factory.cc
+++ b/chromium/media/video/capture/file_video_capture_device_factory.cc
@@ -18,7 +18,7 @@ const char kFileVideoCaptureDeviceName[] =
// Inspects the command line and retrieves the file path parameter.
base::FilePath GetFilePathFromCommandLine() {
base::FilePath command_line_file_path =
- CommandLine::ForCurrentProcess()->GetSwitchValuePath(
+ base::CommandLine::ForCurrentProcess()->GetSwitchValuePath(
switches::kUseFileForFakeVideoCapture);
CHECK(!command_line_file_path.empty());
return command_line_file_path;
@@ -40,7 +40,7 @@ void FileVideoCaptureDeviceFactory::GetDeviceNames(
VideoCaptureDevice::Names* const device_names) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(device_names->empty());
- base::FilePath command_line_file_path = GetFilePathFromCommandLine();
+ const base::FilePath command_line_file_path = GetFilePathFromCommandLine();
#if defined(OS_WIN)
device_names->push_back(VideoCaptureDevice::Name(
base::SysWideToUTF8(command_line_file_path.value()),
@@ -51,6 +51,11 @@ void FileVideoCaptureDeviceFactory::GetDeviceNames(
command_line_file_path.value(),
kFileVideoCaptureDeviceName,
VideoCaptureDevice::Name::AVFOUNDATION));
+#elif defined(OS_LINUX)
+ device_names->push_back(VideoCaptureDevice::Name(
+ command_line_file_path.value(),
+ kFileVideoCaptureDeviceName,
+ VideoCaptureDevice::Name::V4L2_SINGLE_PLANE));
#else
device_names->push_back(VideoCaptureDevice::Name(
command_line_file_path.value(),
@@ -64,6 +69,8 @@ void FileVideoCaptureDeviceFactory::GetDeviceSupportedFormats(
DCHECK(thread_checker_.CalledOnValidThread());
base::File file =
FileVideoCaptureDevice::OpenFileForRead(GetFilePathFromCommandLine());
+ if (!file.IsValid())
+ return;
VideoCaptureFormat capture_format;
FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(&file,
&capture_format);
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate.cc b/chromium/media/video/capture/linux/v4l2_capture_delegate.cc
new file mode 100644
index 00000000000..b2aa498eed4
--- /dev/null
+++ b/chromium/media/video/capture/linux/v4l2_capture_delegate.cc
@@ -0,0 +1,420 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/linux/v4l2_capture_delegate.h"
+
+#include <poll.h>
+#include <sys/fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include "base/bind.h"
+#include "base/files/file_enumerator.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/stringprintf.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/video/capture/linux/v4l2_capture_delegate_multi_plane.h"
+#include "media/video/capture/linux/v4l2_capture_delegate_single_plane.h"
+#include "media/video/capture/linux/video_capture_device_linux.h"
+
+namespace media {
+
+// Desired number of video buffers to allocate. The actual number of allocated
+// buffers by v4l2 driver can be higher or lower than this number.
+// kNumVideoBuffers should not be too small, or Chrome may not return enough
+// buffers back to driver in time.
+const uint32 kNumVideoBuffers = 4;
+// Timeout in milliseconds v4l2_thread_ blocks waiting for a frame from the hw.
+const int kCaptureTimeoutMs = 200;
+// The number of continuous timeouts tolerated before treated as error.
+const int kContinuousTimeoutLimit = 10;
+// MJPEG is preferred if the requested width or height is larger than this.
+const int kMjpegWidth = 640;
+const int kMjpegHeight = 480;
+// Typical framerate, in fps
+const int kTypicalFramerate = 30;
+
+// V4L2 color formats supported by V4L2CaptureDelegate derived classes.
+// This list is ordered by precedence of use -- but see caveats for MJPEG.
+static struct{
+ uint32_t fourcc;
+ VideoPixelFormat pixel_format;
+ size_t num_planes;
+} const kSupportedFormatsAndPlanarity[] = {
+ {V4L2_PIX_FMT_YUV420, PIXEL_FORMAT_I420, 1},
+ {V4L2_PIX_FMT_YUYV, PIXEL_FORMAT_YUY2, 1},
+ {V4L2_PIX_FMT_UYVY, PIXEL_FORMAT_UYVY, 1},
+ {V4L2_PIX_FMT_RGB24, PIXEL_FORMAT_RGB24, 1},
+#if !defined(OS_OPENBSD)
+ // TODO(mcasas): add V4L2_PIX_FMT_YVU420M when available in bots.
+ {V4L2_PIX_FMT_YUV420M, PIXEL_FORMAT_I420, 3},
+#endif
+ // MJPEG is usually sitting fairly low since we don't want to have to decode.
+ // However, is needed for large resolutions due to USB bandwidth limitations,
+ // so GetListOfUsableFourCcs() can duplicate it on top, see that method.
+ {V4L2_PIX_FMT_MJPEG, PIXEL_FORMAT_MJPEG, 1},
+ // JPEG works as MJPEG on some gspca webcams from field reports, see
+ // https://code.google.com/p/webrtc/issues/detail?id=529, put it as the least
+ // preferred format.
+ {V4L2_PIX_FMT_JPEG, PIXEL_FORMAT_MJPEG, 1},
+};
+
+// static
+scoped_refptr<V4L2CaptureDelegate>
+V4L2CaptureDelegate::CreateV4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency) {
+ switch (device_name.capture_api_type()) {
+ case VideoCaptureDevice::Name::V4L2_SINGLE_PLANE:
+ return make_scoped_refptr(new V4L2CaptureDelegateSinglePlane(
+ device_name, v4l2_task_runner, power_line_frequency));
+ case VideoCaptureDevice::Name::V4L2_MULTI_PLANE:
+#if !defined(OS_OPENBSD)
+ return make_scoped_refptr(new V4L2CaptureDelegateMultiPlane(
+ device_name, v4l2_task_runner, power_line_frequency));
+ default:
+#endif
+ NOTIMPLEMENTED() << "Unknown V4L2 capture API type";
+ return scoped_refptr<V4L2CaptureDelegate>();
+ }
+}
+
+//static
+size_t V4L2CaptureDelegate::GetNumPlanesForFourCc(uint32_t fourcc) {
+ for (const auto& fourcc_and_pixel_format : kSupportedFormatsAndPlanarity) {
+ if (fourcc_and_pixel_format.fourcc == fourcc)
+ return fourcc_and_pixel_format.num_planes;
+ }
+ DVLOG(1) << "Unknown fourcc " << FourccToString(fourcc);
+ return 0;
+}
+
+// static
+VideoPixelFormat V4L2CaptureDelegate::V4l2FourCcToChromiumPixelFormat(
+ uint32_t v4l2_fourcc) {
+ for (const auto& fourcc_and_pixel_format : kSupportedFormatsAndPlanarity) {
+ if (fourcc_and_pixel_format.fourcc == v4l2_fourcc)
+ return fourcc_and_pixel_format.pixel_format;
+ }
+ // Not finding a pixel format is OK during device capabilities enumeration.
+ // Let the caller decide if PIXEL_FORMAT_UNKNOWN is an error or not.
+ DVLOG(1) << "Unsupported pixel format: " << FourccToString(v4l2_fourcc);
+ return PIXEL_FORMAT_UNKNOWN;
+}
+
+// static
+std::list<uint32_t> V4L2CaptureDelegate::GetListOfUsableFourCcs(
+ bool prefer_mjpeg) {
+ std::list<uint32_t> supported_formats;
+ for (const auto& format : kSupportedFormatsAndPlanarity)
+ supported_formats.push_back(format.fourcc);
+
+ // Duplicate MJPEG on top of the list depending on |prefer_mjpeg|.
+ if (prefer_mjpeg)
+ supported_formats.push_front(V4L2_PIX_FMT_MJPEG);
+
+ return supported_formats;
+}
+
+//static
+std::string V4L2CaptureDelegate::FourccToString(uint32_t fourcc) {
+ return base::StringPrintf("%c%c%c%c", fourcc & 0xFF, (fourcc >> 8) & 0xFF,
+ (fourcc >> 16) & 0xFF, (fourcc >> 24) & 0xFF);
+}
+
+V4L2CaptureDelegate::BufferTracker::BufferTracker() {
+}
+
+V4L2CaptureDelegate::BufferTracker::~BufferTracker() {
+ for (const auto& plane : planes_) {
+ if (plane.start == nullptr)
+ continue;
+ const int result = munmap(plane.start, plane.length);
+ PLOG_IF(ERROR, result < 0) << "Error munmap()ing V4L2 buffer";
+ }
+}
+
+void V4L2CaptureDelegate::BufferTracker::AddMmapedPlane(uint8_t* const start,
+ size_t length) {
+ Plane plane;
+ plane.start = start;
+ plane.length = length;
+ planes_.push_back(plane);
+}
+
+V4L2CaptureDelegate::V4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency)
+ : capture_type_((device_name.capture_api_type() ==
+ VideoCaptureDevice::Name::V4L2_SINGLE_PLANE)
+ ? V4L2_BUF_TYPE_VIDEO_CAPTURE
+ : V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE),
+ v4l2_task_runner_(v4l2_task_runner),
+ device_name_(device_name),
+ power_line_frequency_(power_line_frequency),
+ is_capturing_(false),
+ timeout_count_(0),
+ rotation_(0) {
+}
+
+V4L2CaptureDelegate::~V4L2CaptureDelegate() {
+}
+
+void V4L2CaptureDelegate::AllocateAndStart(
+ int width,
+ int height,
+ float frame_rate,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ DCHECK(client);
+ client_ = client.Pass();
+
+ // Need to open camera with O_RDWR after Linux kernel 3.3.
+ device_fd_.reset(HANDLE_EINTR(open(device_name_.id().c_str(), O_RDWR)));
+ if (!device_fd_.is_valid()) {
+ SetErrorState("Failed to open V4L2 device driver file.");
+ return;
+ }
+
+ v4l2_capability cap = {};
+ if (!((HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
+ ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE ||
+ cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT_MPLANE)))) {
+ device_fd_.reset();
+ SetErrorState("This is not a V4L2 video capture device");
+ return;
+ }
+
+ // Get supported video formats in preferred order.
+ // For large resolutions, favour mjpeg over raw formats.
+ const std::list<uint32_t>& desired_v4l2_formats =
+ GetListOfUsableFourCcs(width > kMjpegWidth || height > kMjpegHeight);
+ std::list<uint32_t>::const_iterator best = desired_v4l2_formats.end();
+
+ v4l2_fmtdesc fmtdesc = {};
+ fmtdesc.type = capture_type_;
+ for (; HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_ENUM_FMT, &fmtdesc)) == 0;
+ ++fmtdesc.index) {
+ best = std::find(desired_v4l2_formats.begin(), best, fmtdesc.pixelformat);
+ }
+ if (best == desired_v4l2_formats.end()) {
+ SetErrorState("Failed to find a supported camera format.");
+ return;
+ }
+
+ DVLOG(1) << "Chosen pixel format is " << FourccToString(*best);
+
+ video_fmt_.type = capture_type_;
+ if (!FillV4L2Format(&video_fmt_, width, height, *best)) {
+ SetErrorState("Failed filling in V4L2 Format");
+ return;
+ }
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_FMT, &video_fmt_)) < 0) {
+ SetErrorState("Failed to set video capture format");
+ return;
+ }
+ const VideoPixelFormat pixel_format =
+ V4l2FourCcToChromiumPixelFormat(video_fmt_.fmt.pix.pixelformat);
+ if (pixel_format == PIXEL_FORMAT_UNKNOWN) {
+ SetErrorState("Unsupported pixel format");
+ return;
+ }
+
+ // Set capture framerate in the form of capture interval.
+ v4l2_streamparm streamparm = {};
+ streamparm.type = capture_type_;
+ // The following line checks that the driver knows about framerate get/set.
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_G_PARM, &streamparm)) >= 0) {
+ // Now check if the device is able to accept a capture framerate set.
+ if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
+ // |frame_rate| is float, approximate by a fraction.
+ streamparm.parm.capture.timeperframe.numerator =
+ media::kFrameRatePrecision;
+ streamparm.parm.capture.timeperframe.denominator =
+ (frame_rate) ? (frame_rate * media::kFrameRatePrecision)
+ : (kTypicalFramerate * media::kFrameRatePrecision);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_PARM, &streamparm)) <
+ 0) {
+ SetErrorState("Failed to set camera framerate");
+ return;
+ }
+ DVLOG(2) << "Actual camera driverframerate: "
+ << streamparm.parm.capture.timeperframe.denominator << "/"
+ << streamparm.parm.capture.timeperframe.numerator;
+ }
+ }
+ // TODO(mcasas): what should be done if the camera driver does not allow
+ // framerate configuration, or the actual one is different from the desired?
+
+ // Set anti-banding/anti-flicker to 50/60Hz. May fail due to not supported
+ // operation (|errno| == EINVAL in this case) or plain failure.
+ if ((power_line_frequency_ == V4L2_CID_POWER_LINE_FREQUENCY_50HZ) ||
+ (power_line_frequency_ == V4L2_CID_POWER_LINE_FREQUENCY_60HZ) ||
+ (power_line_frequency_ == V4L2_CID_POWER_LINE_FREQUENCY_AUTO)) {
+ struct v4l2_control control = {};
+ control.id = V4L2_CID_POWER_LINE_FREQUENCY;
+ control.value = power_line_frequency_;
+ const int retval =
+ HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_CTRL, &control));
+ if (retval != 0)
+ DVLOG(1) << "Error setting power line frequency removal";
+ }
+
+ capture_format_.frame_size.SetSize(video_fmt_.fmt.pix.width,
+ video_fmt_.fmt.pix.height);
+ capture_format_.frame_rate = frame_rate;
+ capture_format_.pixel_format = pixel_format;
+
+ v4l2_requestbuffers r_buffer = {};
+ r_buffer.type = capture_type_;
+ r_buffer.memory = V4L2_MEMORY_MMAP;
+ r_buffer.count = kNumVideoBuffers;
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0) {
+ SetErrorState("Error requesting MMAP buffers from V4L2");
+ return;
+ }
+ for (unsigned int i = 0; i < r_buffer.count; ++i) {
+ if (!MapAndQueueBuffer(i)) {
+ SetErrorState("Allocate buffer failed");
+ return;
+ }
+ }
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMON, &capture_type_))
+ < 0) {
+ SetErrorState("VIDIOC_STREAMON failed");
+ return;
+ }
+
+ is_capturing_ = true;
+ // Post task to start fetching frames from v4l2.
+ v4l2_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&V4L2CaptureDelegate::DoCapture, this));
+}
+
+void V4L2CaptureDelegate::StopAndDeAllocate() {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ // The order is important: stop streaming, clear |buffer_pool_|,
+ // thus munmap()ing the v4l2_buffers, and then return them to the OS.
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMOFF, &capture_type_))
+ < 0) {
+ SetErrorState("VIDIOC_STREAMOFF failed");
+ return;
+ }
+
+ buffer_tracker_pool_.clear();
+
+ v4l2_requestbuffers r_buffer = {};
+ r_buffer.type = capture_type_;
+ r_buffer.memory = V4L2_MEMORY_MMAP;
+ r_buffer.count = 0;
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0)
+ SetErrorState("Failed to VIDIOC_REQBUFS with count = 0");
+
+ // At this point we can close the device.
+ // This is also needed for correctly changing settings later via VIDIOC_S_FMT.
+ device_fd_.reset();
+ is_capturing_ = false;
+ client_.reset();
+}
+
+void V4L2CaptureDelegate::SetRotation(int rotation) {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ DCHECK(rotation >= 0 && rotation < 360 && rotation % 90 == 0);
+ rotation_ = rotation;
+}
+
+bool V4L2CaptureDelegate::MapAndQueueBuffer(int index) {
+ v4l2_buffer buffer;
+ FillV4L2Buffer(&buffer, index);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYBUF, &buffer)) < 0) {
+ DLOG(ERROR) << "Error querying status of a MMAP V4L2 buffer";
+ return false;
+ }
+
+ const scoped_refptr<BufferTracker>& buffer_tracker = CreateBufferTracker();
+ if (!buffer_tracker->Init(device_fd_.get(), buffer)) {
+ DLOG(ERROR) << "Error creating BufferTracker";
+ return false;
+ }
+ buffer_tracker_pool_.push_back(buffer_tracker);
+
+ // Enqueue the buffer in the drivers incoming queue.
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) < 0) {
+ DLOG(ERROR) << "Error enqueuing a V4L2 buffer back into the driver";
+ return false;
+ }
+ return true;
+}
+
+void V4L2CaptureDelegate::FillV4L2Buffer(v4l2_buffer* buffer,
+ int i) const {
+ memset(buffer, 0, sizeof(*buffer));
+ buffer->memory = V4L2_MEMORY_MMAP;
+ buffer->index = i;
+ FinishFillingV4L2Buffer(buffer);
+}
+
+void V4L2CaptureDelegate::DoCapture() {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ if (!is_capturing_)
+ return;
+
+ pollfd device_pfd = {};
+ device_pfd.fd = device_fd_.get();
+ device_pfd.events = POLLIN;
+ const int result = HANDLE_EINTR(poll(&device_pfd, 1, kCaptureTimeoutMs));
+ if (result < 0) {
+ SetErrorState("Poll failed");
+ return;
+ }
+ // Check if poll() timed out; track the amount of times it did in a row and
+ // throw an error if it times out too many times.
+ if (result == 0) {
+ timeout_count_++;
+ if (timeout_count_ >= kContinuousTimeoutLimit) {
+ SetErrorState("Multiple continuous timeouts while read-polling.");
+ timeout_count_ = 0;
+ return;
+ }
+ } else {
+ timeout_count_ = 0;
+ }
+
+ // Deenqueue, send and reenqueue a buffer if the driver has filled one in.
+ if (device_pfd.revents & POLLIN) {
+ v4l2_buffer buffer;
+ FillV4L2Buffer(&buffer, 0);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_DQBUF, &buffer)) < 0) {
+ SetErrorState("Failed to dequeue capture buffer");
+ return;
+ }
+
+ SendBuffer(buffer_tracker_pool_[buffer.index], video_fmt_);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) < 0) {
+ SetErrorState("Failed to enqueue capture buffer");
+ return;
+ }
+ }
+
+ v4l2_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&V4L2CaptureDelegate::DoCapture, this));
+}
+
+void V4L2CaptureDelegate::SetErrorState(const std::string& reason) {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ is_capturing_ = false;
+ client_->OnError(reason);
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate.h b/chromium/media/video/capture/linux/v4l2_capture_delegate.h
new file mode 100644
index 00000000000..bd0ccad6cf7
--- /dev/null
+++ b/chromium/media/video/capture/linux/v4l2_capture_delegate.h
@@ -0,0 +1,150 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_V4L2_VIDEO_CAPTURE_DELEGATE_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_VIDEO_CAPTURE_DELEGATE_H_
+
+#if defined(OS_OPENBSD)
+#include <sys/videoio.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
+#include "base/files/scoped_file.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "media/video/capture/video_capture_device.h"
+
+namespace media {
+
+// Class doing the actual Linux capture using V4L2 API. V4L2 SPLANE/MPLANE
+// capture specifics are implemented in derived classes. Created and destroyed
+// on the owner's thread, otherwise living and operating on |v4l2_task_runner_|.
+class V4L2CaptureDelegate
+ : public base::RefCountedThreadSafe<V4L2CaptureDelegate> {
+ public:
+ // Creates the appropiate VideoCaptureDelegate according to parameters.
+ static scoped_refptr<V4L2CaptureDelegate> CreateV4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency);
+
+ // Retrieves the #planes for a given |fourcc|, or 0 if unknown.
+ static size_t GetNumPlanesForFourCc(uint32_t fourcc);
+ // Returns the Chrome pixel format for |v4l2_fourcc| or PIXEL_FORMAT_UNKNOWN.
+ static VideoPixelFormat V4l2FourCcToChromiumPixelFormat(uint32_t v4l2_fourcc);
+
+ // Composes a list of usable and supported pixel formats, in order of
+ // preference, with MJPEG prioritised depending on |prefer_mjpeg|.
+ static std::list<uint32_t> GetListOfUsableFourCcs(bool prefer_mjpeg);
+
+ // Forward-to versions of VideoCaptureDevice virtual methods.
+ void AllocateAndStart(int width,
+ int height,
+ float frame_rate,
+ scoped_ptr<VideoCaptureDevice::Client> client);
+ void StopAndDeAllocate();
+
+ void SetRotation(int rotation);
+
+ protected:
+ // Class keeping track of SPLANE/MPLANE V4L2 buffers, mmap()ed on construction
+ // and munmap()ed on destruction. Destruction is syntactically equal for
+ // S/MPLANE but not construction, so this is implemented in derived classes.
+ // Internally it has a vector of planes, which for SPLANE will contain only
+ // one element.
+ class BufferTracker : public base::RefCounted<BufferTracker> {
+ public:
+ BufferTracker();
+ // Abstract method to mmap() given |fd| according to |buffer|, planarity
+ // specific.
+ virtual bool Init(int fd, const v4l2_buffer& buffer) = 0;
+
+ uint8_t* const GetPlaneStart(size_t plane) const {
+ DCHECK_LT(plane, planes_.size());
+ return planes_[plane].start;
+ }
+
+ protected:
+ friend class base::RefCounted<BufferTracker>;
+ virtual ~BufferTracker();
+ // Adds a given mmap()ed plane to |planes_|.
+ void AddMmapedPlane(uint8_t* const start, size_t length);
+
+ private:
+ struct Plane {
+ uint8_t* start;
+ size_t length;
+ };
+ std::vector<Plane> planes_;
+ };
+
+ V4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency);
+ virtual ~V4L2CaptureDelegate();
+
+ // Creates the necessary, planarity-specific, internal tracking schemes,
+ virtual scoped_refptr<BufferTracker> CreateBufferTracker() const = 0;
+
+ // Fill in |format| with the given parameters, in a planarity dependent way.
+ virtual bool FillV4L2Format(v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const = 0;
+
+ // Finish filling |buffer| struct with planarity-dependent data.
+ virtual void FinishFillingV4L2Buffer(v4l2_buffer* buffer) const = 0;
+
+ // Sends the captured |buffer| to the |client_|, synchronously.
+ virtual void SendBuffer(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const = 0;
+
+ // A few accessors for SendBuffer()'s to access private member variables.
+ VideoCaptureFormat capture_format() const { return capture_format_; }
+ VideoCaptureDevice::Client* client() const { return client_.get(); }
+ int rotation() const { return rotation_; }
+
+ private:
+ friend class base::RefCountedThreadSafe<V4L2CaptureDelegate>;
+
+ // Returns the input |fourcc| as a std::string four char representation.
+ static std::string FourccToString(uint32_t fourcc);
+ // VIDIOC_QUERYBUFs a buffer from V4L2, creates a BufferTracker for it and
+ // enqueues it (VIDIOC_QBUF) back into V4L2.
+ bool MapAndQueueBuffer(int index);
+ // Fills all common parts of |buffer|. Delegates to FinishFillingV4L2Buffer()
+ // for filling in the planar-dependent parts.
+ void FillV4L2Buffer(v4l2_buffer* buffer, int i) const;
+ void DoCapture();
+ void SetErrorState(const std::string& reason);
+
+ const v4l2_buf_type capture_type_;
+ const scoped_refptr<base::SingleThreadTaskRunner> v4l2_task_runner_;
+ const VideoCaptureDevice::Name device_name_;
+ const int power_line_frequency_;
+
+ // The following members are only known on AllocateAndStart().
+ VideoCaptureFormat capture_format_;
+ v4l2_format video_fmt_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+ base::ScopedFD device_fd_;
+
+ // Vector of BufferTracker to keep track of mmap()ed pointers and their use.
+ std::vector<scoped_refptr<BufferTracker>> buffer_tracker_pool_;
+
+ bool is_capturing_;
+ int timeout_count_;
+
+ // Clockwise rotation in degrees. This value should be 0, 90, 180, or 270.
+ int rotation_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2CaptureDelegate);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_V4L2_VIDEO_CAPTURE_DELEGATE_H_
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.cc b/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.cc
new file mode 100644
index 00000000000..7551bed434c
--- /dev/null
+++ b/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.cc
@@ -0,0 +1,99 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/linux/v4l2_capture_delegate_multi_plane.h"
+
+#include <sys/mman.h>
+
+namespace media {
+
+V4L2CaptureDelegateMultiPlane::V4L2CaptureDelegateMultiPlane(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency)
+ : V4L2CaptureDelegate(device_name,
+ v4l2_task_runner,
+ power_line_frequency) {
+}
+
+V4L2CaptureDelegateMultiPlane::~V4L2CaptureDelegateMultiPlane() {
+}
+
+scoped_refptr<V4L2CaptureDelegate::BufferTracker>
+V4L2CaptureDelegateMultiPlane::CreateBufferTracker() const {
+ return make_scoped_refptr(new BufferTrackerMPlane());
+}
+
+bool V4L2CaptureDelegateMultiPlane::FillV4L2Format(
+ v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const {
+ format->fmt.pix_mp.width = width;
+ format->fmt.pix_mp.height = height;
+ format->fmt.pix_mp.pixelformat = pixelformat_fourcc;
+
+ const size_t num_v4l2_planes =
+ V4L2CaptureDelegate::GetNumPlanesForFourCc(pixelformat_fourcc);
+ if (num_v4l2_planes == 0u)
+ return false;
+ DCHECK_LE(num_v4l2_planes, static_cast<size_t>(VIDEO_MAX_PLANES));
+ format->fmt.pix_mp.num_planes = num_v4l2_planes;
+
+ v4l2_planes_.resize(num_v4l2_planes);
+ return true;
+}
+
+void V4L2CaptureDelegateMultiPlane::FinishFillingV4L2Buffer(
+ v4l2_buffer* buffer) const {
+ buffer->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ buffer->length = v4l2_planes_.size();
+
+ static const struct v4l2_plane empty_plane = {};
+ std::fill(v4l2_planes_.begin(), v4l2_planes_.end(), empty_plane);
+ buffer->m.planes = v4l2_planes_.data();
+}
+
+void V4L2CaptureDelegateMultiPlane::SendBuffer(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const {
+ DCHECK_EQ(capture_format().pixel_format, PIXEL_FORMAT_I420);
+ const size_t y_stride = format.fmt.pix_mp.plane_fmt[0].bytesperline;
+ const size_t u_stride = format.fmt.pix_mp.plane_fmt[1].bytesperline;
+ const size_t v_stride = format.fmt.pix_mp.plane_fmt[2].bytesperline;
+ DCHECK_GE(y_stride, 1u * capture_format().frame_size.width());
+ DCHECK_GE(u_stride, 1u * capture_format().frame_size.width() / 2);
+ DCHECK_GE(v_stride, 1u * capture_format().frame_size.width() / 2);
+ client()->OnIncomingCapturedYuvData(buffer_tracker->GetPlaneStart(0),
+ buffer_tracker->GetPlaneStart(1),
+ buffer_tracker->GetPlaneStart(2),
+ y_stride,
+ u_stride,
+ v_stride,
+ capture_format(),
+ rotation(),
+ base::TimeTicks::Now());
+}
+
+bool V4L2CaptureDelegateMultiPlane::BufferTrackerMPlane::Init(
+ int fd,
+ const v4l2_buffer& buffer) {
+ for (size_t p = 0; p < buffer.length; ++p) {
+ // Some devices require mmap() to be called with both READ and WRITE.
+ // See http://crbug.com/178582.
+ void* const start =
+ mmap(NULL, buffer.m.planes[p].length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, buffer.m.planes[p].m.mem_offset);
+ if (start == MAP_FAILED) {
+ DLOG(ERROR) << "Error mmap()ing a V4L2 buffer into userspace";
+ return false;
+ }
+ AddMmapedPlane(static_cast<uint8_t*>(start), buffer.m.planes[p].length);
+ DVLOG(3) << "Mmap()ed plane #" << p << " of " << buffer.m.planes[p].length
+ << "B";
+ }
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.h b/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.h
new file mode 100644
index 00000000000..45aec2e8996
--- /dev/null
+++ b/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.h
@@ -0,0 +1,59 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_MULTI_PLANE_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_MULTI_PLANE_H_
+
+#include "base/memory/ref_counted.h"
+#include "media/video/capture/linux/v4l2_capture_delegate.h"
+
+#if defined(OS_OPENBSD)
+#error "OpenBSD does not support MPlane capture API."
+#endif
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
+namespace media {
+
+// V4L2 specifics for MPLANE API.
+class V4L2CaptureDelegateMultiPlane final : public V4L2CaptureDelegate {
+ public:
+ V4L2CaptureDelegateMultiPlane(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency);
+
+ private:
+ // BufferTracker derivation to implement construction semantics for MPLANE.
+ class BufferTrackerMPlane final : public BufferTracker {
+ public:
+ bool Init(int fd, const v4l2_buffer& buffer) override;
+
+ private:
+ ~BufferTrackerMPlane() override {}
+ };
+
+ ~V4L2CaptureDelegateMultiPlane() override;
+
+ // V4L2CaptureDelegate virtual methods implementation.
+ scoped_refptr<BufferTracker> CreateBufferTracker() const override;
+ bool FillV4L2Format(v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const override;
+ void FinishFillingV4L2Buffer(v4l2_buffer* buffer) const override;
+ void SendBuffer(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const override;
+
+ // Vector to allocate and track as many v4l2_plane structs as planes, needed
+ // for v4l2_buffer.m.planes. This is a scratchpad marked mutable to enable
+ // using it in otherwise const methods.
+ mutable std::vector<struct v4l2_plane> v4l2_planes_;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_SINGLE_PLANE_H_
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.cc b/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.cc
new file mode 100644
index 00000000000..0e6097428ad
--- /dev/null
+++ b/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.cc
@@ -0,0 +1,60 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/linux/v4l2_capture_delegate_single_plane.h"
+
+#include <sys/mman.h>
+
+namespace media {
+
+scoped_refptr<V4L2CaptureDelegate::BufferTracker>
+V4L2CaptureDelegateSinglePlane::CreateBufferTracker() const {
+ return make_scoped_refptr(new BufferTrackerSPlane());
+}
+
+bool V4L2CaptureDelegateSinglePlane::FillV4L2Format(
+ v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const {
+ format->fmt.pix.width = width;
+ format->fmt.pix.height = height;
+ format->fmt.pix.pixelformat = pixelformat_fourcc;
+ return true;
+}
+
+void V4L2CaptureDelegateSinglePlane::FinishFillingV4L2Buffer(
+ v4l2_buffer* buffer) const {
+ buffer->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+}
+
+void V4L2CaptureDelegateSinglePlane::SendBuffer(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const {
+ const size_t data_length = format.fmt.pix.sizeimage;
+ DCHECK_GE(data_length, capture_format().ImageAllocationSize());
+ client()->OnIncomingCapturedData(
+ buffer_tracker->GetPlaneStart(0),
+ data_length,
+ capture_format(),
+ rotation(),
+ base::TimeTicks::Now());
+}
+
+bool V4L2CaptureDelegateSinglePlane::BufferTrackerSPlane::Init(
+ int fd,
+ const v4l2_buffer& buffer) {
+ // Some devices require mmap() to be called with both READ and WRITE.
+ // See http://crbug.com/178582.
+ void* const start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, buffer.m.offset);
+ if (start == MAP_FAILED) {
+ DLOG(ERROR) << "Error mmap()ing a V4L2 buffer into userspace";
+ return false;
+ }
+ AddMmapedPlane(static_cast<uint8_t*>(start), buffer.length);
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.h b/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.h
new file mode 100644
index 00000000000..7ca4d343fab
--- /dev/null
+++ b/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.h
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_SINGLE_PLANE_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_SINGLE_PLANE_H_
+
+#include "base/memory/ref_counted.h"
+#include "media/video/capture/linux/v4l2_capture_delegate.h"
+#include "media/video/capture/video_capture_device.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
+namespace media {
+
+// V4L2 specifics for SPLANE API.
+class V4L2CaptureDelegateSinglePlane final : public V4L2CaptureDelegate {
+ public:
+ V4L2CaptureDelegateSinglePlane(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency)
+ : V4L2CaptureDelegate(device_name,
+ v4l2_task_runner,
+ power_line_frequency) {}
+
+ private:
+ // BufferTracker derivation to implement construction semantics for SPLANE.
+ class BufferTrackerSPlane final : public BufferTracker {
+ public:
+ bool Init(int fd, const v4l2_buffer& buffer) override;
+
+ private:
+ ~BufferTrackerSPlane() override {}
+ };
+
+ ~V4L2CaptureDelegateSinglePlane() override {}
+
+ // V4L2CaptureDelegate virtual methods implementation.
+ scoped_refptr<BufferTracker> CreateBufferTracker() const override;
+ bool FillV4L2Format(v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const override;
+ void FinishFillingV4L2Buffer(v4l2_buffer* buffer) const override;
+ void SendBuffer(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const override;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_MULTI_PLANE_H_
diff --git a/chromium/media/video/capture/linux/video_capture_device_chromeos.cc b/chromium/media/video/capture/linux/video_capture_device_chromeos.cc
index abbc3b2c544..5abfbffaa25 100644
--- a/chromium/media/video/capture/linux/video_capture_device_chromeos.cc
+++ b/chromium/media/video/capture/linux/video_capture_device_chromeos.cc
@@ -6,7 +6,8 @@
#include "base/bind.h"
#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
#include "ui/gfx/display.h"
#include "ui/gfx/display_observer.h"
#include "ui/gfx/screen.h"
@@ -24,7 +25,7 @@ class VideoCaptureDeviceChromeOS::ScreenObserverDelegate
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner)
: capture_device_(capture_device),
ui_task_runner_(ui_task_runner),
- capture_task_runner_(base::MessageLoopProxy::current()) {
+ capture_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
ui_task_runner_->PostTask(
FROM_HERE,
base::Bind(&ScreenObserverDelegate::AddObserverOnUIThread, this));
@@ -41,14 +42,12 @@ class VideoCaptureDeviceChromeOS::ScreenObserverDelegate
private:
friend class base::RefCountedThreadSafe<ScreenObserverDelegate>;
- virtual ~ScreenObserverDelegate() {
- DCHECK(!capture_device_);
- }
+ ~ScreenObserverDelegate() override { DCHECK(!capture_device_); }
- virtual void OnDisplayAdded(const gfx::Display& /*new_display*/) override {}
- virtual void OnDisplayRemoved(const gfx::Display& /*old_display*/) override {}
- virtual void OnDisplayMetricsChanged(const gfx::Display& display,
- uint32_t metrics) override {
+ void OnDisplayAdded(const gfx::Display& /*new_display*/) override {}
+ void OnDisplayRemoved(const gfx::Display& /*old_display*/) override {}
+ void OnDisplayMetricsChanged(const gfx::Display& display,
+ uint32_t metrics) override {
DCHECK(ui_task_runner_->BelongsToCurrentThread());
if (!(metrics & DISPLAY_METRIC_ROTATION))
return;
diff --git a/chromium/media/video/capture/linux/video_capture_device_chromeos.h b/chromium/media/video/capture/linux/video_capture_device_chromeos.h
index 6a79cd09485..50b77a9debb 100644
--- a/chromium/media/video/capture/linux/video_capture_device_chromeos.h
+++ b/chromium/media/video/capture/linux/video_capture_device_chromeos.h
@@ -21,7 +21,7 @@ class VideoCaptureDeviceChromeOS : public VideoCaptureDeviceLinux {
explicit VideoCaptureDeviceChromeOS(
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
const Name& device_name);
- virtual ~VideoCaptureDeviceChromeOS();
+ ~VideoCaptureDeviceChromeOS() override;
private:
class ScreenObserverDelegate;
diff --git a/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc b/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc
index 36d69d87405..966e4f7b90b 100644
--- a/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc
+++ b/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc
@@ -24,24 +24,104 @@
namespace media {
-static bool HasUsableFormats(int fd) {
- v4l2_fmtdesc fmtdesc;
- std::list<int> usable_fourccs;
+static bool HasUsableFormats(int fd, uint32 capabilities) {
+ const std::list<uint32_t>& usable_fourccs =
+ VideoCaptureDeviceLinux::GetListOfUsableFourCCs(false);
+
+ static const struct {
+ int capability;
+ v4l2_buf_type buf_type;
+ } kCapabilityAndBufferTypes[] = {
+ {V4L2_CAP_VIDEO_CAPTURE, V4L2_BUF_TYPE_VIDEO_CAPTURE},
+ {V4L2_CAP_VIDEO_CAPTURE_MPLANE, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE}
+ };
+
+ for (const auto& capability_and_buffer_type : kCapabilityAndBufferTypes) {
+ v4l2_fmtdesc fmtdesc = {};
+ if (capabilities & capability_and_buffer_type.capability) {
+ fmtdesc.type = capability_and_buffer_type.buf_type;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc)) == 0;
+ ++fmtdesc.index) {
+ if (std::find(usable_fourccs.begin(), usable_fourccs.end(),
+ fmtdesc.pixelformat) != usable_fourccs.end())
+ return true;
+ }
+ }
+ }
+ DLOG(ERROR) << "No usable formats found";
+ return false;
+}
- media::VideoCaptureDeviceLinux::GetListOfUsableFourCCs(false,
- &usable_fourccs);
+static std::list<float> GetFrameRateList(int fd,
+ uint32 fourcc,
+ uint32 width,
+ uint32 height) {
+ std::list<float> frame_rates;
+
+ v4l2_frmivalenum frame_interval = {};
+ frame_interval.pixel_format = fourcc;
+ frame_interval.width = width;
+ frame_interval.height = height;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS,
+ &frame_interval)) == 0; ++frame_interval.index) {
+ if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
+ if (frame_interval.discrete.numerator != 0) {
+ frame_rates.push_back(frame_interval.discrete.denominator /
+ static_cast<float>(frame_interval.discrete.numerator));
+ }
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS ||
+ frame_interval.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ }
+ }
+ // Some devices, e.g. Kinect, do not enumerate any frame rates, see
+ // http://crbug.com/412284. Set their frame_rate to zero.
+ if (frame_rates.empty())
+ frame_rates.push_back(0);
+ return frame_rates;
+}
- memset(&fmtdesc, 0, sizeof(v4l2_fmtdesc));
- fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+static void GetSupportedFormatsForV4L2BufferType(
+ int fd,
+ v4l2_buf_type buf_type,
+ media::VideoCaptureFormats* supported_formats) {
+ v4l2_fmtdesc v4l2_format = {};
+ v4l2_format.type = buf_type;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FMT, &v4l2_format)) == 0;
+ ++v4l2_format.index) {
+ VideoCaptureFormat supported_format;
+ supported_format.pixel_format =
+ VideoCaptureDeviceLinux::V4l2FourCcToChromiumPixelFormat(
+ v4l2_format.pixelformat);
- while (HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc)) == 0) {
- if (std::find(usable_fourccs.begin(), usable_fourccs.end(),
- fmtdesc.pixelformat) != usable_fourccs.end())
- return true;
+ if (supported_format.pixel_format == PIXEL_FORMAT_UNKNOWN)
+ continue;
- fmtdesc.index++;
+ v4l2_frmsizeenum frame_size = {};
+ frame_size.pixel_format = v4l2_format.pixelformat;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frame_size)) == 0;
+ ++frame_size.index) {
+ if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ supported_format.frame_size.SetSize(frame_size.discrete.width,
+ frame_size.discrete.height);
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE ||
+ frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ }
+
+ const std::list<float> frame_rates = GetFrameRateList(
+ fd, v4l2_format.pixelformat, frame_size.discrete.width,
+ frame_size.discrete.height);
+ for (const auto& frame_rate : frame_rates) {
+ supported_format.frame_rate = frame_rate;
+ supported_formats->push_back(supported_format);
+ DVLOG(1) << supported_format.ToString();
+ }
+ }
}
- return false;
}
VideoCaptureDeviceFactoryLinux::VideoCaptureDeviceFactoryLinux(
@@ -68,7 +148,7 @@ scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryLinux::Create(
// allocates the camera.
base::ScopedFD fd(HANDLE_EINTR(open(device_name.id().c_str(), O_RDONLY)));
if (!fd.is_valid()) {
- DVLOG(1) << "Cannot open device";
+ DLOG(ERROR) << "Cannot open device";
delete self;
return scoped_ptr<VideoCaptureDevice>();
}
@@ -80,32 +160,34 @@ void VideoCaptureDeviceFactoryLinux::GetDeviceNames(
VideoCaptureDevice::Names* const device_names) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(device_names->empty());
- base::FilePath path("/dev/");
+ const base::FilePath path("/dev/");
base::FileEnumerator enumerator(
path, false, base::FileEnumerator::FILES, "video*");
while (!enumerator.Next().empty()) {
- base::FileEnumerator::FileInfo info = enumerator.GetInfo();
-
- std::string unique_id = path.value() + info.GetName().value();
- base::ScopedFD fd(HANDLE_EINTR(open(unique_id.c_str(), O_RDONLY)));
+ const base::FileEnumerator::FileInfo info = enumerator.GetInfo();
+ const std::string unique_id = path.value() + info.GetName().value();
+ const base::ScopedFD fd(HANDLE_EINTR(open(unique_id.c_str(), O_RDONLY)));
if (!fd.is_valid()) {
- // Failed to open this device.
+ DLOG(ERROR) << "Couldn't open " << info.GetName().value();
continue;
}
- // Test if this is a V4L2 capture device.
+ // Test if this is a V4L2 capture device and if it has at least one
+ // supported capture format. Devices that have capture and output
+ // capabilities at the same time are memory-to-memory and are skipped, see
+ // http://crbug.com/139356.
v4l2_capability cap;
if ((HANDLE_EINTR(ioctl(fd.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
- (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) &&
- !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT)) {
- // This is a V4L2 video capture device
- if (HasUsableFormats(fd.get())) {
- VideoCaptureDevice::Name device_name(base::StringPrintf("%s", cap.card),
- unique_id);
- device_names->push_back(device_name);
- } else {
- DVLOG(1) << "No usable formats reported by " << info.GetName().value();
- }
+ ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE ||
+ cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT_MPLANE)) &&
+ HasUsableFormats(fd.get(), cap.capabilities)) {
+ device_names->push_back(VideoCaptureDevice::Name(
+ base::StringPrintf("%s", cap.card), unique_id,
+ (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE)
+ ? VideoCaptureDevice::Name::V4L2_MULTI_PLANE
+ : VideoCaptureDevice::Name::V4L2_SINGLE_PLANE));
}
}
}
@@ -117,82 +199,26 @@ void VideoCaptureDeviceFactoryLinux::GetDeviceSupportedFormats(
if (device.id().empty())
return;
base::ScopedFD fd(HANDLE_EINTR(open(device.id().c_str(), O_RDONLY)));
- if (!fd.is_valid()) {
- // Failed to open this device.
+ if (!fd.is_valid()) // Failed to open this device.
return;
- }
supported_formats->clear();
- // Retrieve the caps one by one, first get pixel format, then sizes, then
- // frame rates. See http://linuxtv.org/downloads/v4l-dvb-apis for reference.
- v4l2_fmtdesc pixel_format = {};
- pixel_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- while (HANDLE_EINTR(ioctl(fd.get(), VIDIOC_ENUM_FMT, &pixel_format)) == 0) {
- VideoCaptureFormat supported_format;
- supported_format.pixel_format =
- VideoCaptureDeviceLinux::V4l2ColorToVideoCaptureColorFormat(
- (int32)pixel_format.pixelformat);
- if (supported_format.pixel_format == PIXEL_FORMAT_UNKNOWN) {
- ++pixel_format.index;
- continue;
- }
+ DCHECK_NE(device.capture_api_type(),
+ VideoCaptureDevice::Name::API_TYPE_UNKNOWN);
+ const v4l2_buf_type buf_type =
+ (device.capture_api_type() == VideoCaptureDevice::Name::V4L2_MULTI_PLANE)
+ ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
+ : V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ GetSupportedFormatsForV4L2BufferType(fd.get(), buf_type, supported_formats);
- v4l2_frmsizeenum frame_size = {};
- frame_size.pixel_format = pixel_format.pixelformat;
- while (HANDLE_EINTR(ioctl(fd.get(), VIDIOC_ENUM_FRAMESIZES, &frame_size)) ==
- 0) {
- if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
- supported_format.frame_size.SetSize(
- frame_size.discrete.width, frame_size.discrete.height);
- } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- } else if (frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- }
- v4l2_frmivalenum frame_interval = {};
- frame_interval.pixel_format = pixel_format.pixelformat;
- frame_interval.width = frame_size.discrete.width;
- frame_interval.height = frame_size.discrete.height;
- std::list<float> frame_rates;
- while (HANDLE_EINTR(ioctl(
- fd.get(), VIDIOC_ENUM_FRAMEINTERVALS, &frame_interval)) == 0) {
- if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
- if (frame_interval.discrete.numerator != 0) {
- frame_rates.push_back(
- static_cast<float>(frame_interval.discrete.denominator) /
- static_cast<float>(frame_interval.discrete.numerator));
- }
- } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- break;
- } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- break;
- }
- ++frame_interval.index;
- }
-
- // Some devices, e.g. Kinect, do not enumerate any frame rates. For these
- // devices, we do not want to lose all enumeration (pixel format and
- // resolution), so we return a frame rate of zero instead.
- if (frame_rates.empty())
- frame_rates.push_back(0);
-
- for (std::list<float>::iterator it = frame_rates.begin();
- it != frame_rates.end(); ++it) {
- supported_format.frame_rate = *it;
- supported_formats->push_back(supported_format);
- DVLOG(1) << device.name() << " " << supported_format.ToString();
- }
- ++frame_size.index;
- }
- ++pixel_format.index;
- }
return;
}
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryLinux(ui_task_runner);
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/linux/video_capture_device_factory_linux.h b/chromium/media/video/capture/linux/video_capture_device_factory_linux.h
index da570e3ca3a..8e52abbcc3d 100644
--- a/chromium/media/video/capture/linux/video_capture_device_factory_linux.h
+++ b/chromium/media/video/capture/linux/video_capture_device_factory_linux.h
@@ -9,7 +9,7 @@
#include "media/video/capture/video_capture_device_factory.h"
-#include "media/video/capture/video_capture_types.h"
+#include "media/base/video_capture_types.h"
namespace media {
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.cc b/chromium/media/video/capture/linux/video_capture_device_linux.cc
index b0c147e9ff5..5bf6dc6a66d 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.cc
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.cc
@@ -4,49 +4,20 @@
#include "media/video/capture/linux/video_capture_device_linux.h"
-#include <errno.h>
-#include <fcntl.h>
#if defined(OS_OPENBSD)
#include <sys/videoio.h>
#else
#include <linux/videodev2.h>
#endif
-#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <list>
-#include <string>
#include "base/bind.h"
-#include "base/files/file_enumerator.h"
-#include "base/files/scoped_file.h"
-#include "base/posix/eintr_wrapper.h"
#include "base/strings/stringprintf.h"
+#include "media/video/capture/linux/v4l2_capture_delegate.h"
namespace media {
-// Max number of video buffers VideoCaptureDeviceLinux can allocate.
-enum { kMaxVideoBuffers = 2 };
-// Timeout in microseconds v4l2_thread_ blocks waiting for a frame from the hw.
-enum { kCaptureTimeoutUs = 200000 };
-// The number of continuous timeouts tolerated before treated as error.
-enum { kContinuousTimeoutLimit = 10 };
-// Time to wait in milliseconds before v4l2_thread_ reschedules OnCaptureTask
-// if an event is triggered (select) but no video frame is read.
-enum { kCaptureSelectWaitMs = 10 };
-// MJPEG is preferred if the width or height is larger than this.
-enum { kMjpegWidth = 640 };
-enum { kMjpegHeight = 480 };
-// Typical framerate, in fps
-enum { kTypicalFramerate = 30 };
-
-// V4L2 color formats VideoCaptureDeviceLinux support.
-static const int32 kV4l2RawFmts[] = {
- V4L2_PIX_FMT_YUV420,
- V4L2_PIX_FMT_YUYV,
- V4L2_PIX_FMT_UYVY
-};
-
// USB VID and PID are both 4 bytes long.
static const size_t kVidPidSize = 4;
@@ -57,7 +28,7 @@ static const char kVidPathTemplate[] =
static const char kPidPathTemplate[] =
"/sys/class/video4linux/%s/device/../idProduct";
-bool ReadIdFile(const std::string path, std::string* id) {
+static bool ReadIdFile(const std::string path, std::string* id) {
char id_buf[kVidPidSize];
FILE* file = fopen(path.c_str(), "rb");
if (!file)
@@ -70,45 +41,18 @@ bool ReadIdFile(const std::string path, std::string* id) {
return true;
}
-// This function translates Video4Linux pixel formats to Chromium pixel formats,
-// should only support those listed in GetListOfUsableFourCCs.
+// Translates Video4Linux pixel formats to Chromium pixel formats.
// static
-VideoPixelFormat VideoCaptureDeviceLinux::V4l2ColorToVideoCaptureColorFormat(
- int32 v4l2_fourcc) {
- VideoPixelFormat result = PIXEL_FORMAT_UNKNOWN;
- switch (v4l2_fourcc) {
- case V4L2_PIX_FMT_YUV420:
- result = PIXEL_FORMAT_I420;
- break;
- case V4L2_PIX_FMT_YUYV:
- result = PIXEL_FORMAT_YUY2;
- break;
- case V4L2_PIX_FMT_UYVY:
- result = PIXEL_FORMAT_UYVY;
- break;
- case V4L2_PIX_FMT_MJPEG:
- case V4L2_PIX_FMT_JPEG:
- result = PIXEL_FORMAT_MJPEG;
- break;
- default:
- DVLOG(1) << "Unsupported pixel format " << std::hex << v4l2_fourcc;
- }
- return result;
+VideoPixelFormat VideoCaptureDeviceLinux::V4l2FourCcToChromiumPixelFormat(
+ uint32 v4l2_fourcc) {
+ return V4L2CaptureDelegate::V4l2FourCcToChromiumPixelFormat(v4l2_fourcc);
}
+// Gets a list of usable Four CC formats prioritised.
// static
-void VideoCaptureDeviceLinux::GetListOfUsableFourCCs(bool favour_mjpeg,
- std::list<int>* fourccs) {
- for (size_t i = 0; i < arraysize(kV4l2RawFmts); ++i)
- fourccs->push_back(kV4l2RawFmts[i]);
- if (favour_mjpeg)
- fourccs->push_front(V4L2_PIX_FMT_MJPEG);
- else
- fourccs->push_back(V4L2_PIX_FMT_MJPEG);
-
- // JPEG works as MJPEG on some gspca webcams from field reports.
- // Put it as the least preferred format.
- fourccs->push_back(V4L2_PIX_FMT_JPEG);
+std::list<uint32_t> VideoCaptureDeviceLinux::GetListOfUsableFourCCs(
+ bool favour_mjpeg) {
+ return V4L2CaptureDelegate::GetListOfUsableFourCcs(favour_mjpeg);
}
const std::string VideoCaptureDevice::Name::GetModel() const {
@@ -134,18 +78,13 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
}
VideoCaptureDeviceLinux::VideoCaptureDeviceLinux(const Name& device_name)
- : is_capturing_(false),
- device_name_(device_name),
- v4l2_thread_("V4L2Thread"),
- buffer_pool_(NULL),
- buffer_pool_size_(0),
- timeout_count_(0),
- rotation_(0) {
+ : v4l2_thread_("V4L2CaptureThread"),
+ device_name_(device_name) {
}
VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
// Check if the thread is running.
- // This means that the device have not been DeAllocated properly.
+ // This means that the device has not been StopAndDeAllocate()d properly.
DCHECK(!v4l2_thread_.IsRunning());
v4l2_thread_.Stop();
}
@@ -153,359 +92,57 @@ VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
void VideoCaptureDeviceLinux::AllocateAndStart(
const VideoCaptureParams& params,
scoped_ptr<VideoCaptureDevice::Client> client) {
- if (v4l2_thread_.IsRunning()) {
+ DCHECK(!capture_impl_);
+ if (v4l2_thread_.IsRunning())
return; // Wrong state.
- }
v4l2_thread_.Start();
+
+ const int line_frequency =
+ TranslatePowerLineFrequencyToV4L2(GetPowerLineFrequencyForLocation());
+ capture_impl_ = V4L2CaptureDelegate::CreateV4L2CaptureDelegate(
+ device_name_, v4l2_thread_.message_loop_proxy(), line_frequency);
+ if (!capture_impl_) {
+ client->OnError("Failed to create VideoCaptureDelegate");
+ return;
+ }
v4l2_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnAllocateAndStart,
- base::Unretained(this),
+ base::Bind(&V4L2CaptureDelegate::AllocateAndStart, capture_impl_,
params.requested_format.frame_size.width(),
params.requested_format.frame_size.height(),
- params.requested_format.frame_rate,
- base::Passed(&client)));
+ params.requested_format.frame_rate, base::Passed(&client)));
}
void VideoCaptureDeviceLinux::StopAndDeAllocate() {
- if (!v4l2_thread_.IsRunning()) {
+ if (!v4l2_thread_.IsRunning())
return; // Wrong state.
- }
v4l2_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnStopAndDeAllocate,
- base::Unretained(this)));
+ base::Bind(&V4L2CaptureDelegate::StopAndDeAllocate, capture_impl_));
v4l2_thread_.Stop();
- // Make sure no buffers are still allocated.
- // This can happen (theoretically) if an error occurs when trying to stop
- // the camera.
- DeAllocateVideoBuffers();
+
+ capture_impl_ = NULL;
}
void VideoCaptureDeviceLinux::SetRotation(int rotation) {
if (v4l2_thread_.IsRunning()) {
v4l2_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::SetRotationOnV4L2Thread,
- base::Unretained(this), rotation));
- } else {
- // If the |v4l2_thread_| is not running, there's no race condition and
- // |rotation_| can be set directly.
- rotation_ = rotation;
+ FROM_HERE, base::Bind(&V4L2CaptureDelegate::SetRotation,
+ capture_impl_, rotation));
}
}
-void VideoCaptureDeviceLinux::SetRotationOnV4L2Thread(int rotation) {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
- DCHECK(rotation >= 0 && rotation < 360 && rotation % 90 == 0);
- rotation_ = rotation;
-}
-
-void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
- int height,
- float frame_rate,
- scoped_ptr<Client> client) {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
-
- client_ = client.Pass();
-
- // Need to open camera with O_RDWR after Linux kernel 3.3.
- device_fd_.reset(HANDLE_EINTR(open(device_name_.id().c_str(), O_RDWR)));
- if (!device_fd_.is_valid()) {
- SetErrorState("Failed to open V4L2 device driver.");
- return;
- }
-
- // Test if this is a V4L2 capture device.
- v4l2_capability cap;
- if (!((HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
- (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) &&
- !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT))) {
- // This is not a V4L2 video capture device.
- device_fd_.reset();
- SetErrorState("This is not a V4L2 video capture device");
- return;
- }
-
- // Get supported video formats in preferred order.
- // For large resolutions, favour mjpeg over raw formats.
- std::list<int> v4l2_formats;
- GetListOfUsableFourCCs(width > kMjpegWidth || height > kMjpegHeight,
- &v4l2_formats);
-
- v4l2_fmtdesc fmtdesc = {0};
- fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- // Enumerate image formats.
- std::list<int>::iterator best = v4l2_formats.end();
- while (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_ENUM_FMT, &fmtdesc)) ==
- 0) {
- best = std::find(v4l2_formats.begin(), best, fmtdesc.pixelformat);
- fmtdesc.index++;
- }
-
- if (best == v4l2_formats.end()) {
- SetErrorState("Failed to find a supported camera format.");
- return;
- }
-
- // Set format and frame size now.
- v4l2_format video_fmt;
- memset(&video_fmt, 0, sizeof(v4l2_format));
- video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- video_fmt.fmt.pix.sizeimage = 0;
- video_fmt.fmt.pix.width = width;
- video_fmt.fmt.pix.height = height;
- video_fmt.fmt.pix.pixelformat = *best;
-
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_FMT, &video_fmt)) < 0) {
- SetErrorState(
- base::StringPrintf("Failed to set camera format: %s", strerror(errno)));
- return;
- }
-
- // Set capture framerate in the form of capture interval.
- v4l2_streamparm streamparm;
- memset(&streamparm, 0, sizeof(v4l2_streamparm));
- streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- // The following line checks that the driver knows about framerate get/set.
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_G_PARM, &streamparm)) >= 0) {
- // Now check if the device is able to accept a capture framerate set.
- if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
- // |frame_rate| is float, approximate by a fraction.
- streamparm.parm.capture.timeperframe.numerator =
- media::kFrameRatePrecision;
- streamparm.parm.capture.timeperframe.denominator = (frame_rate) ?
- (frame_rate * media::kFrameRatePrecision) :
- (kTypicalFramerate * media::kFrameRatePrecision);
-
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_PARM, &streamparm)) <
- 0) {
- SetErrorState("Failed to set camera framerate");
- return;
- }
- DVLOG(2) << "Actual camera driverframerate: "
- << streamparm.parm.capture.timeperframe.denominator << "/"
- << streamparm.parm.capture.timeperframe.numerator;
- }
- }
- // TODO(mcasas): what should be done if the camera driver does not allow
- // framerate configuration, or the actual one is different from the desired?
-
- // Set anti-banding/anti-flicker to 50/60Hz. May fail due to not supported
- // operation (|errno| == EINVAL in this case) or plain failure.
- const int power_line_frequency = GetPowerLineFrequencyForLocation();
- if ((power_line_frequency == kPowerLine50Hz) ||
- (power_line_frequency == kPowerLine60Hz)) {
- struct v4l2_control control = {};
- control.id = V4L2_CID_POWER_LINE_FREQUENCY;
- control.value = (power_line_frequency == kPowerLine50Hz) ?
- V4L2_CID_POWER_LINE_FREQUENCY_50HZ :
- V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
- HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_CTRL, &control));
- }
-
- // Store our current width and height.
- capture_format_.frame_size.SetSize(video_fmt.fmt.pix.width,
- video_fmt.fmt.pix.height);
- capture_format_.frame_rate = frame_rate;
- capture_format_.pixel_format =
- V4l2ColorToVideoCaptureColorFormat(video_fmt.fmt.pix.pixelformat);
-
- // Start capturing.
- if (!AllocateVideoBuffers()) {
- // Error, We can not recover.
- SetErrorState("Allocate buffer failed");
- return;
- }
-
- // Start UVC camera.
- v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMON, &type)) == -1) {
- SetErrorState("VIDIOC_STREAMON failed");
- return;
- }
-
- is_capturing_ = true;
- // Post task to start fetching frames from v4l2.
- v4l2_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnCaptureTask,
- base::Unretained(this)));
-}
-
-void VideoCaptureDeviceLinux::OnStopAndDeAllocate() {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
-
- v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMOFF, &type)) < 0) {
- SetErrorState("VIDIOC_STREAMOFF failed");
- return;
- }
- // We don't dare to deallocate the buffers if we can't stop
- // the capture device.
- DeAllocateVideoBuffers();
-
- // We need to close and open the device if we want to change the settings
- // Otherwise VIDIOC_S_FMT will return error
- // Sad but true.
- device_fd_.reset();
- is_capturing_ = false;
- client_.reset();
-}
-
-void VideoCaptureDeviceLinux::OnCaptureTask() {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
- if (!is_capturing_)
- return;
-
- fd_set r_set;
- FD_ZERO(&r_set);
- FD_SET(device_fd_.get(), &r_set);
- timeval timeout;
-
- timeout.tv_sec = 0;
- timeout.tv_usec = kCaptureTimeoutUs;
-
- // First argument to select is the highest numbered file descriptor +1.
- // Refer to http://linux.die.net/man/2/select for more information.
- int result =
- HANDLE_EINTR(select(device_fd_.get() + 1, &r_set, NULL, NULL, &timeout));
- // Check if select have failed.
- if (result < 0) {
- // EINTR is a signal. This is not really an error.
- if (errno != EINTR) {
- SetErrorState("Select failed");
- return;
- }
- v4l2_thread_.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnCaptureTask,
- base::Unretained(this)),
- base::TimeDelta::FromMilliseconds(kCaptureSelectWaitMs));
- }
-
- // Check if select timeout.
- if (result == 0) {
- timeout_count_++;
- if (timeout_count_ >= kContinuousTimeoutLimit) {
- SetErrorState(base::StringPrintf(
- "Continuous timeout %d times", timeout_count_));
- timeout_count_ = 0;
- return;
- }
- } else {
- timeout_count_ = 0;
- }
-
- // Check if the driver have filled a buffer.
- if (FD_ISSET(device_fd_.get(), &r_set)) {
- v4l2_buffer buffer;
- memset(&buffer, 0, sizeof(buffer));
- buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buffer.memory = V4L2_MEMORY_MMAP;
- // Dequeue a buffer.
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_DQBUF, &buffer)) == 0) {
- client_->OnIncomingCapturedData(
- static_cast<uint8*>(buffer_pool_[buffer.index].start),
- buffer.bytesused,
- capture_format_,
- rotation_,
- base::TimeTicks::Now());
-
- // Enqueue the buffer again.
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) == -1) {
- SetErrorState(base::StringPrintf(
- "Failed to enqueue capture buffer errno %d", errno));
- }
- } else {
- SetErrorState(base::StringPrintf(
- "Failed to dequeue capture buffer errno %d", errno));
- return;
- }
- }
-
- v4l2_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnCaptureTask,
- base::Unretained(this)));
-}
-
-bool VideoCaptureDeviceLinux::AllocateVideoBuffers() {
- v4l2_requestbuffers r_buffer;
- memset(&r_buffer, 0, sizeof(r_buffer));
-
- r_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- r_buffer.memory = V4L2_MEMORY_MMAP;
- r_buffer.count = kMaxVideoBuffers;
-
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0) {
- return false;
- }
-
- if (r_buffer.count > kMaxVideoBuffers) {
- r_buffer.count = kMaxVideoBuffers;
- }
-
- buffer_pool_size_ = r_buffer.count;
-
- // Map the buffers.
- buffer_pool_ = new Buffer[r_buffer.count];
- for (unsigned int i = 0; i < r_buffer.count; i++) {
- v4l2_buffer buffer;
- memset(&buffer, 0, sizeof(buffer));
- buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buffer.memory = V4L2_MEMORY_MMAP;
- buffer.index = i;
-
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYBUF, &buffer)) < 0) {
- return false;
- }
-
- // Some devices require mmap() to be called with both READ and WRITE.
- // See crbug.com/178582.
- buffer_pool_[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
- MAP_SHARED, device_fd_.get(), buffer.m.offset);
- if (buffer_pool_[i].start == MAP_FAILED) {
- return false;
- }
- buffer_pool_[i].length = buffer.length;
- // Enqueue the buffer in the drivers incoming queue.
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) < 0) {
- return false;
- }
- }
- return true;
-}
-
-void VideoCaptureDeviceLinux::DeAllocateVideoBuffers() {
- if (!buffer_pool_)
- return;
-
- // Unmaps buffers.
- for (int i = 0; i < buffer_pool_size_; i++) {
- munmap(buffer_pool_[i].start, buffer_pool_[i].length);
- }
- v4l2_requestbuffers r_buffer;
- memset(&r_buffer, 0, sizeof(r_buffer));
- r_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- r_buffer.memory = V4L2_MEMORY_MMAP;
- r_buffer.count = 0;
-
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0) {
- SetErrorState("Failed to reset buf.");
+// static
+int VideoCaptureDeviceLinux::TranslatePowerLineFrequencyToV4L2(int frequency) {
+ switch (frequency) {
+ case kPowerLine50Hz:
+ return V4L2_CID_POWER_LINE_FREQUENCY_50HZ;
+ case kPowerLine60Hz:
+ return V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
+ default:
+ // If we have no idea of the frequency, at least try and set it to AUTO.
+ return V4L2_CID_POWER_LINE_FREQUENCY_AUTO;
}
-
- delete [] buffer_pool_;
- buffer_pool_ = NULL;
- buffer_pool_size_ = 0;
-}
-
-void VideoCaptureDeviceLinux::SetErrorState(const std::string& reason) {
- DCHECK(!v4l2_thread_.IsRunning() ||
- v4l2_thread_.message_loop() == base::MessageLoop::current());
- is_capturing_ = false;
- client_->OnError(reason);
}
} // namespace media
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.h b/chromium/media/video/capture/linux/video_capture_device_linux.h
index 79836a370fc..998d3a49563 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.h
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.h
@@ -15,16 +15,18 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/threading/thread.h"
+#include "media/base/video_capture_types.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
namespace media {
+class V4L2CaptureDelegate;
+
+// Linux V4L2 implementation of VideoCaptureDevice.
class VideoCaptureDeviceLinux : public VideoCaptureDevice {
public:
- static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(int32 v4l2_fourcc);
- static void GetListOfUsableFourCCs(bool favour_mjpeg,
- std::list<int>* fourccs);
+ static VideoPixelFormat V4l2FourCcToChromiumPixelFormat(uint32 v4l2_fourcc);
+ static std::list<uint32_t> GetListOfUsableFourCCs(bool favour_mjpeg);
explicit VideoCaptureDeviceLinux(const Name& device_name);
~VideoCaptureDeviceLinux() override;
@@ -32,49 +34,22 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice {
// VideoCaptureDevice implementation.
void AllocateAndStart(const VideoCaptureParams& params,
scoped_ptr<Client> client) override;
-
void StopAndDeAllocate() override;
protected:
void SetRotation(int rotation);
- // Once |v4l2_thread_| is started, only called on that thread.
- void SetRotationOnV4L2Thread(int rotation);
-
private:
- // Buffers used to receive video frames from with v4l2.
- struct Buffer {
- Buffer() : start(0), length(0) {}
- void* start;
- size_t length;
- };
-
- // Called on the v4l2_thread_.
- void OnAllocateAndStart(int width,
- int height,
- float frame_rate,
- scoped_ptr<Client> client);
- void OnStopAndDeAllocate();
- void OnCaptureTask();
-
- bool AllocateVideoBuffers();
- void DeAllocateVideoBuffers();
- void SetErrorState(const std::string& reason);
-
- bool is_capturing_;
- scoped_ptr<VideoCaptureDevice::Client> client_;
- Name device_name_;
- base::ScopedFD device_fd_; // File descriptor for the opened camera device.
+ static int TranslatePowerLineFrequencyToV4L2(int frequency);
+
+ // Internal delegate doing the actual capture setting, buffer allocation and
+ // circulacion with the V4L2 API. Created and deleted in the thread where
+ // VideoCaptureDeviceLinux lives but otherwise operating on |v4l2_thread_|.
+ scoped_refptr<V4L2CaptureDelegate> capture_impl_;
+
base::Thread v4l2_thread_; // Thread used for reading data from the device.
- Buffer* buffer_pool_;
- int buffer_pool_size_; // Number of allocated buffers.
- int timeout_count_;
- VideoCaptureFormat capture_format_;
-
- // Clockwise rotation in degrees. This value should be 0, 90, 180, or 270.
- // This is only used on |v4l2_thread_| when it is running, or the constructor
- // thread otherwise.
- int rotation_;
+
+ const Name device_name_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceLinux);
};
diff --git a/chromium/media/video/capture/mac/OWNERS b/chromium/media/video/capture/mac/OWNERS
deleted file mode 100644
index ad809814d3d..00000000000
--- a/chromium/media/video/capture/mac/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-mcasas@chromium.org
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
index f7acf345281..e30c2f8bce9 100644
--- a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
@@ -11,9 +11,9 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#import "media/base/mac/avfoundation_glue.h"
+#include "media/base/video_capture_types.h"
#import "media/video/capture/mac/platform_video_capturing_mac.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
namespace media {
class VideoCaptureDeviceMac;
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
index 050162890a3..f3c7ca76bd2 100644
--- a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
@@ -9,7 +9,26 @@
#include "base/logging.h"
#include "base/mac/foundation_util.h"
#include "media/video/capture/mac/video_capture_device_mac.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
+
+// Prefer MJPEG if frame width or height is larger than this.
+static const int kMjpegWidthThreshold = 640;
+static const int kMjpegHeightThreshold = 480;
+
+// This function translates Mac Core Video pixel formats to Chromium pixel
+// formats. Chromium pixel formats are sorted in order of preference.
+media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
+ switch (code) {
+ case kCVPixelFormatType_422YpCbCr8:
+ return media::PIXEL_FORMAT_UYVY;
+ case CoreMediaGlue::kCMPixelFormat_422YpCbCr8_yuvs:
+ return media::PIXEL_FORMAT_YUY2;
+ case CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML:
+ return media::PIXEL_FORMAT_MJPEG;
+ default:
+ return media::PIXEL_FORMAT_UNKNOWN;
+ }
+}
@implementation VideoCaptureDeviceAVFoundation
@@ -55,20 +74,9 @@
for (CrAVCaptureDeviceFormat* format in device.formats) {
// MediaSubType is a CMPixelFormatType but can be used as CVPixelFormatType
// as well according to CMFormatDescription.h
- media::VideoPixelFormat pixelFormat = media::PIXEL_FORMAT_UNKNOWN;
- switch (CoreMediaGlue::CMFormatDescriptionGetMediaSubType(
- [format formatDescription])) {
- case kCVPixelFormatType_422YpCbCr8: // Typical.
- pixelFormat = media::PIXEL_FORMAT_UYVY;
- break;
- case CoreMediaGlue::kCMPixelFormat_422YpCbCr8_yuvs:
- pixelFormat = media::PIXEL_FORMAT_YUY2;
- break;
- case CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML:
- pixelFormat = media::PIXEL_FORMAT_MJPEG;
- default:
- break;
- }
+ const media::VideoPixelFormat pixelFormat = FourCCToChromiumPixelFormat(
+ CoreMediaGlue::CMFormatDescriptionGetMediaSubType(
+ [format formatDescription]));
CoreMediaGlue::CMVideoDimensions dimensions =
CoreMediaGlue::CMVideoFormatDescriptionGetDimensions(
@@ -183,6 +191,25 @@
frameHeight_ = height;
frameRate_ = frameRate;
+ FourCharCode best_fourcc = kCVPixelFormatType_422YpCbCr8;
+ const bool prefer_mjpeg =
+ width > kMjpegWidthThreshold || height > kMjpegHeightThreshold;
+ for (CrAVCaptureDeviceFormat* format in captureDevice_.formats) {
+ const FourCharCode fourcc =
+ CoreMediaGlue::CMFormatDescriptionGetMediaSubType(
+ [format formatDescription]);
+ if (prefer_mjpeg &&
+ fourcc == CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML) {
+ best_fourcc = fourcc;
+ break;
+ }
+ // Compare according to Chromium preference.
+ if (FourCCToChromiumPixelFormat(fourcc) <
+ FourCCToChromiumPixelFormat(best_fourcc)) {
+ best_fourcc = fourcc;
+ }
+ }
+
// The capture output has to be configured, despite Mac documentation
// detailing that setting the sessionPreset would be enough. The reason for
// this mismatch is probably because most of the AVFoundation docs are written
@@ -192,7 +219,7 @@
NSDictionary* videoSettingsDictionary = @{
(id)kCVPixelBufferWidthKey : @(width),
(id)kCVPixelBufferHeightKey : @(height),
- (id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_422YpCbCr8),
+ (id)kCVPixelBufferPixelFormatTypeKey : @(best_fourcc),
AVFoundationGlue::AVVideoScalingModeKey() :
AVFoundationGlue::AVVideoScalingModeResizeAspectFill()
};
@@ -252,30 +279,57 @@
// AVFoundation calls from a number of threads, depending on, at least, if
// Chrome is on foreground or background. Sample the actual thread here.
callback_thread_checker_.DetachFromThread();
- callback_thread_checker_.CalledOnValidThread();
- CVImageBufferRef videoFrame =
- CoreMediaGlue::CMSampleBufferGetImageBuffer(sampleBuffer);
- // Lock the frame and calculate frame size.
- const int kLockFlags = 0;
- if (CVPixelBufferLockBaseAddress(videoFrame, kLockFlags) ==
- kCVReturnSuccess) {
- void* baseAddress = CVPixelBufferGetBaseAddress(videoFrame);
- size_t bytesPerRow = CVPixelBufferGetBytesPerRow(videoFrame);
- size_t frameWidth = CVPixelBufferGetWidth(videoFrame);
- size_t frameHeight = CVPixelBufferGetHeight(videoFrame);
- size_t frameSize = bytesPerRow * frameHeight;
- UInt8* addressToPass = reinterpret_cast<UInt8*>(baseAddress);
-
- media::VideoCaptureFormat captureFormat(
- gfx::Size(frameWidth, frameHeight),
- frameRate_,
- media::PIXEL_FORMAT_UYVY);
+ CHECK(callback_thread_checker_.CalledOnValidThread());
+
+ const CoreMediaGlue::CMFormatDescriptionRef formatDescription =
+ CoreMediaGlue::CMSampleBufferGetFormatDescription(sampleBuffer);
+ const FourCharCode fourcc =
+ CoreMediaGlue::CMFormatDescriptionGetMediaSubType(formatDescription);
+ const CoreMediaGlue::CMVideoDimensions dimensions =
+ CoreMediaGlue::CMVideoFormatDescriptionGetDimensions(formatDescription);
+ const media::VideoCaptureFormat captureFormat(
+ gfx::Size(dimensions.width, dimensions.height),
+ frameRate_,
+ FourCCToChromiumPixelFormat(fourcc));
+
+ char* baseAddress = 0;
+ size_t frameSize = 0;
+ CVImageBufferRef videoFrame = nil;
+ if (fourcc == CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML) {
+ // If MJPEG, use block buffer instead of pixel buffer.
+ CoreMediaGlue::CMBlockBufferRef blockBuffer =
+ CoreMediaGlue::CMSampleBufferGetDataBuffer(sampleBuffer);
+ if (blockBuffer) {
+ size_t lengthAtOffset;
+ CoreMediaGlue::CMBlockBufferGetDataPointer(
+ blockBuffer, 0, &lengthAtOffset, &frameSize, &baseAddress);
+ // Expect the MJPEG data to be available as a contiguous reference, i.e.
+ // not covered by multiple memory blocks.
+ CHECK_EQ(lengthAtOffset, frameSize);
+ }
+ } else {
+ videoFrame = CoreMediaGlue::CMSampleBufferGetImageBuffer(sampleBuffer);
+ // Lock the frame and calculate frame size.
+ if (CVPixelBufferLockBaseAddress(videoFrame, kCVPixelBufferLock_ReadOnly) ==
+ kCVReturnSuccess) {
+ baseAddress = static_cast<char*>(CVPixelBufferGetBaseAddress(videoFrame));
+ frameSize = CVPixelBufferGetHeight(videoFrame) *
+ CVPixelBufferGetBytesPerRow(videoFrame);
+ } else {
+ videoFrame = nil;
+ }
+ }
+
+ {
base::AutoLock lock(lock_);
- if (!frameReceiver_)
- return;
- frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureFormat, 0, 0);
- CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
+ if (frameReceiver_ && baseAddress) {
+ frameReceiver_->ReceiveFrame(reinterpret_cast<uint8_t*>(baseAddress),
+ frameSize, captureFormat, 0, 0);
+ }
}
+
+ if (videoFrame)
+ CVPixelBufferUnlockBaseAddress(videoFrame, kCVPixelBufferLock_ReadOnly);
}
- (void)onVideoError:(NSNotification*)errorNotification {
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm b/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm
index b6b1356b4bf..79863463ca1 100644
--- a/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/location.h"
+#include "base/profiler/scoped_tracker.h"
#include "base/strings/string_util.h"
#include "base/task_runner_util.h"
#import "media/base/mac/avfoundation_glue.h"
@@ -44,6 +45,12 @@ static bool IsDeviceBlacklisted(const VideoCaptureDevice::Name& name) {
static scoped_ptr<media::VideoCaptureDevice::Names>
EnumerateDevicesUsingQTKit() {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/458397 is
+ // fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "458397 media::EnumerateDevicesUsingQTKit"));
+
scoped_ptr<VideoCaptureDevice::Names> device_names(
new VideoCaptureDevice::Names());
NSMutableDictionary* capture_devices =
@@ -64,6 +71,11 @@ static void RunDevicesEnumeratedCallback(
const base::Callback<void(scoped_ptr<media::VideoCaptureDevice::Names>)>&
callback,
scoped_ptr<media::VideoCaptureDevice::Names> device_names) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/458397 is
+ // fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "458397 media::RunDevicesEnumeratedCallback"));
callback.Run(device_names.Pass());
}
@@ -86,23 +98,6 @@ scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryMac::Create(
DCHECK_NE(device_name.capture_api_type(),
VideoCaptureDevice::Name::API_TYPE_UNKNOWN);
- // Check device presence only for AVFoundation API, since it is too expensive
- // and brittle for QTKit. The actual initialization at device level will fail
- // subsequently if the device is not present.
- if (AVFoundationGlue::IsAVFoundationSupported()) {
- scoped_ptr<VideoCaptureDevice::Names> device_names(
- new VideoCaptureDevice::Names());
- GetDeviceNames(device_names.get());
-
- VideoCaptureDevice::Names::iterator it = device_names->begin();
- for (; it != device_names->end(); ++it) {
- if (it->id() == device_name.id())
- break;
- }
- if (it == device_names->end())
- return scoped_ptr<VideoCaptureDevice>();
- }
-
scoped_ptr<VideoCaptureDevice> capture_device;
if (device_name.capture_api_type() == VideoCaptureDevice::Name::DECKLINK) {
capture_device.reset(new VideoCaptureDeviceDeckLinkMac(device_name));
@@ -119,6 +114,11 @@ scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryMac::Create(
void VideoCaptureDeviceFactoryMac::GetDeviceNames(
VideoCaptureDevice::Names* device_names) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/458397 is
+ // fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "458397 VideoCaptureDeviceFactoryMac::GetDeviceNames"));
DCHECK(thread_checker_.CalledOnValidThread());
// Loop through all available devices and add to |device_names|.
NSDictionary* capture_devices;
@@ -204,4 +204,11 @@ void VideoCaptureDeviceFactoryMac::GetDeviceSupportedFormats(
}
}
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryMac(ui_task_runner);
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm b/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm
index e015a2f2b56..45027bac822 100644
--- a/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm
@@ -3,7 +3,8 @@
// found in the LICENSE file.
#include "base/command_line.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/message_loop/message_loop.h"
+#include "base/thread_task_runner_handle.h"
#import "media/base/mac/avfoundation_glue.h"
#include "media/base/media_switches.h"
#include "media/video/capture/mac/video_capture_device_factory_mac.h"
@@ -13,10 +14,14 @@
namespace media {
class VideoCaptureDeviceFactoryMacTest : public testing::Test {
- virtual void SetUp() {
- CommandLine::ForCurrentProcess()->AppendSwitch(
+ void SetUp() override {
+ AVFoundationGlue::InitializeAVFoundation();
+ base::CommandLine::ForCurrentProcess()->AppendSwitch(
switches::kEnableAVFoundation);
}
+
+ private:
+ base::MessageLoop message_loop_;
};
TEST_F(VideoCaptureDeviceFactoryMacTest, ListDevicesAVFoundation) {
@@ -25,7 +30,7 @@ TEST_F(VideoCaptureDeviceFactoryMacTest, ListDevicesAVFoundation) {
return;
}
VideoCaptureDeviceFactoryMac video_capture_device_factory(
- base::MessageLoopProxy::current());
+ base::ThreadTaskRunnerHandle::Get());
VideoCaptureDevice::Names names;
video_capture_device_factory.GetDeviceNames(&names);
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.h b/chromium/media/video/capture/mac/video_capture_device_mac.h
index a3fd1b1061d..bbe8a230558 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.h
@@ -18,8 +18,8 @@
#include "base/mac/scoped_nsobject.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "media/base/video_capture_types.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
@protocol PlatformVideoCapturingMac;
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.mm b/chromium/media/video/capture/mac/video_capture_device_mac.mm
index 65103e5e86f..521a11178dd 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.mm
@@ -11,16 +11,17 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/mac/scoped_ioobject.h"
#include "base/mac/scoped_ioplugininterface.h"
+#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
+#include "base/thread_task_runner_handle.h"
#include "base/time/time.h"
#import "media/base/mac/avfoundation_glue.h"
#import "media/video/capture/mac/platform_video_capturing_mac.h"
#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
@implementation DeviceNameAndTransportType
@@ -118,7 +119,7 @@ static bool FindDeviceInterfaceInUsbDevice(
const int product_id,
const io_service_t usb_device,
IOUSBDeviceInterface*** device_interface) {
- // Create a plug-in, i.e. a user-side controller to manipulate USB device.
+ // Create a plugin, i.e. a user-side controller to manipulate USB device.
IOCFPlugInInterface** plugin;
SInt32 score; // Unused, but required for IOCreatePlugInInterfaceForService.
kern_return_t kr =
@@ -133,7 +134,7 @@ static bool FindDeviceInterfaceInUsbDevice(
}
base::mac::ScopedIOPluginInterface<IOCFPlugInInterface> plugin_ref(plugin);
- // Fetch the Device Interface from the plug-in.
+ // Fetch the Device Interface from the plugin.
HRESULT res =
(*plugin)->QueryInterface(plugin,
CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID),
@@ -179,7 +180,7 @@ static bool FindVideoControlInterfaceInDeviceInterface(
}
base::mac::ScopedIOObject<io_service_t> found_interface_ref(found_interface);
- // Create a user side controller (i.e. a "plug-in") for the found interface.
+ // Create a user side controller (i.e. a "plugin") for the found interface.
SInt32 score;
kr = IOCreatePlugInInterfaceForService(found_interface,
kIOUSBInterfaceUserClientTypeID,
@@ -198,8 +199,8 @@ static bool FindVideoControlInterfaceInDeviceInterface(
static void SetAntiFlickerInVideoControlInterface(
IOCFPlugInInterface** plugin_interface,
const int frequency) {
- // Create, the control interface for the found plug-in, and release
- // the intermediate plug-in.
+ // Create, the control interface for the found plugin, and release
+ // the intermediate plugin.
IOUSBInterfaceInterface** control_interface = NULL;
HRESULT res = (*plugin_interface)->QueryInterface(
plugin_interface,
@@ -345,7 +346,7 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
VideoCaptureDeviceMac::VideoCaptureDeviceMac(const Name& device_name)
: device_name_(device_name),
tried_to_square_pixels_(false),
- task_runner_(base::MessageLoopProxy::current()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
state_(kNotInitialized),
capture_device_(nil),
weak_factory_(this) {
@@ -393,7 +394,9 @@ void VideoCaptureDeviceMac::AllocateAndStart(
capture_format_.frame_rate =
std::max(kMinFrameRate,
std::min(params.requested_format.frame_rate, kMaxFrameRate));
- capture_format_.pixel_format = PIXEL_FORMAT_UYVY;
+ // Leave the pixel format selection to AVFoundation/QTKit. The pixel format
+ // will be passed to |ReceiveFrame|.
+ capture_format_.pixel_format = PIXEL_FORMAT_UNKNOWN;
// QTKit: Set the capture resolution only if this is VGA or smaller, otherwise
// leave it unconfigured and start capturing: QTKit will produce frames at the
@@ -539,7 +542,7 @@ void VideoCaptureDeviceMac::ReceiveFrame(
client_->OnIncomingCapturedData(video_frame,
video_frame_length,
- capture_format_,
+ frame_format,
0,
base::TimeTicks::Now());
}
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
index 09622e40a26..a8492d595c2 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
@@ -9,10 +9,10 @@
#include "base/debug/crash_logging.h"
#include "base/logging.h"
#include "base/mac/scoped_nsexception_enabler.h"
+#include "media/base/video_capture_types.h"
#include "media/video/capture/mac/video_capture_device_mac.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
@implementation VideoCaptureDeviceQTKit
diff --git a/chromium/media/video/capture/video_capture_device.cc b/chromium/media/video/capture/video_capture_device.cc
index dc62fc9ccd7..44442c49a50 100644
--- a/chromium/media/video/capture/video_capture_device.cc
+++ b/chromium/media/video/capture/video_capture_device.cc
@@ -24,7 +24,14 @@ VideoCaptureDevice::Name::Name() {}
VideoCaptureDevice::Name::Name(const std::string& name, const std::string& id)
: device_name_(name), unique_id_(id) {}
-#if defined(OS_WIN)
+#if defined(OS_LINUX)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name),
+ unique_id_(id),
+ capture_api_class_(api_type) {}
+#elif defined(OS_WIN)
VideoCaptureDevice::Name::Name(const std::string& name,
const std::string& id,
const CaptureApiType api_type)
@@ -32,9 +39,7 @@ VideoCaptureDevice::Name::Name(const std::string& name,
unique_id_(id),
capture_api_class_(api_type),
capabilities_id_(id) {}
-#endif
-
-#if defined(OS_MACOSX)
+#elif defined(OS_MACOSX)
VideoCaptureDevice::Name::Name(const std::string& name,
const std::string& id,
const CaptureApiType api_type)
@@ -53,19 +58,36 @@ VideoCaptureDevice::Name::Name(const std::string& name,
capture_api_class_(api_type),
transport_type_(transport_type),
is_blacklisted_(false) {}
+#elif defined(ANDROID)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name),
+ unique_id_(id),
+ capture_api_class_(api_type) {}
#endif
VideoCaptureDevice::Name::~Name() {}
-#if defined(OS_WIN)
+#if defined(OS_LINUX)
+const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
+ switch (capture_api_type()) {
+ case V4L2_SINGLE_PLANE:
+ return "V4L2 SPLANE";
+ case V4L2_MULTI_PLANE:
+ return "V4L2 MPLANE";
+ default:
+ NOTREACHED() << "Unknown Video Capture API type!";
+ return "Unknown API";
+ }
+}
+#elif defined(OS_WIN)
const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
switch(capture_api_type()) {
case MEDIA_FOUNDATION:
return "Media Foundation";
case DIRECT_SHOW:
return "Direct Show";
- case DIRECT_SHOW_WDM_CROSSBAR:
- return "Direct Show WDM Crossbar";
default:
NOTREACHED() << "Unknown Video Capture API type!";
return "Unknown API";
@@ -85,8 +107,29 @@ const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
return "Unknown API";
}
}
+#elif defined(OS_ANDROID)
+const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
+ switch(capture_api_type()) {
+ case API1:
+ return "Camera API1";
+ case API2_LEGACY:
+ return "Camera API2 Legacy";
+ case API2_FULL:
+ return "Camera API2 Full";
+ case API2_LIMITED:
+ return "Camera API2 Limited";
+ case TANGO:
+ return "Tango API";
+ case API_TYPE_UNKNOWN:
+ default:
+ NOTREACHED() << "Unknown Video Capture API type!";
+ return "Unknown API";
+ }
+}
#endif
+VideoCaptureDevice::Client::Buffer::~Buffer() {}
+
VideoCaptureDevice::~VideoCaptureDevice() {}
int VideoCaptureDevice::GetPowerLineFrequencyForLocation() const {
@@ -109,10 +152,4 @@ int VideoCaptureDevice::GetPowerLineFrequencyForLocation() const {
return kPowerLine60Hz;
}
-bool VideoCaptureDevice::InitializeImageCapture(
- const ImageCaptureFormat& image_format,
- scoped_ptr<ImageClient> client) {
- return false;
-}
-
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device.h b/chromium/media/video/capture/video_capture_device.h
index 455dc83d3e9..41a1d771d11 100644
--- a/chromium/media/video/capture/video_capture_device.h
+++ b/chromium/media/video/capture/video_capture_device.h
@@ -21,8 +21,9 @@
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
+#include "media/base/video_capture_types.h"
#include "media/base/video_frame.h"
-#include "media/video/capture/video_capture_types.h"
+#include "ui/gfx/gpu_memory_buffer.h"
namespace media {
@@ -41,16 +42,21 @@ class MEDIA_EXPORT VideoCaptureDevice {
Name();
Name(const std::string& name, const std::string& id);
-#if defined(OS_WIN)
+#if defined(OS_LINUX)
+ // Linux/CrOS targets Capture Api type: it can only be set on construction.
+ enum CaptureApiType {
+ V4L2_SINGLE_PLANE,
+ V4L2_MULTI_PLANE,
+ API_TYPE_UNKNOWN
+ };
+#elif defined(OS_WIN)
// Windows targets Capture Api type: it can only be set on construction.
enum CaptureApiType {
MEDIA_FOUNDATION,
DIRECT_SHOW,
- DIRECT_SHOW_WDM_CROSSBAR,
API_TYPE_UNKNOWN
};
-#endif
-#if defined(OS_MACOSX)
+#elif defined(OS_MACOSX)
// Mac targets Capture Api type: it can only be set on construction.
enum CaptureApiType {
AVFOUNDATION,
@@ -63,10 +69,25 @@ class MEDIA_EXPORT VideoCaptureDevice {
USB_OR_BUILT_IN,
OTHER_TRANSPORT
};
+#elif defined (OS_ANDROID)
+ // Android targets Capture Api type: it can only be set on construction.
+ // Automatically generated enum to interface with Java world.
+ //
+ // A Java counterpart will be generated for this enum.
+ // GENERATED_JAVA_ENUM_PACKAGE: org.chromium.media
+ enum CaptureApiType {
+ API1,
+ API2_LEGACY,
+ API2_FULL,
+ API2_LIMITED,
+ TANGO,
+ API_TYPE_UNKNOWN
+ };
#endif
-#if defined(OS_WIN) || defined(OS_MACOSX)
- Name(const std::string& name,
- const std::string& id,
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+ Name(const std::string& name, const std::string& id,
const CaptureApiType api_type);
#endif
#if defined(OS_MACOSX)
@@ -102,7 +123,8 @@ class MEDIA_EXPORT VideoCaptureDevice {
return unique_id_ < other.id();
}
-#if defined(OS_WIN) || defined(OS_MACOSX)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
CaptureApiType capture_api_type() const {
return capture_api_class_.capture_api_type();
}
@@ -117,7 +139,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
void set_capabilities_id(const std::string& id) {
capabilities_id_ = id;
}
-#endif
+#endif // if defined(OS_WIN)
#if defined(OS_MACOSX)
TransportType transport_type() const {
return transport_type_;
@@ -128,12 +150,13 @@ class MEDIA_EXPORT VideoCaptureDevice {
void set_is_blacklisted(bool is_blacklisted) {
is_blacklisted_ = is_blacklisted;
}
-#endif // if defined(OS_WIN)
+#endif // if defined(OS_MACOSX)
private:
std::string device_name_;
std::string unique_id_;
-#if defined(OS_WIN) || defined(OS_MACOSX)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
// This class wraps the CaptureApiType to give it a by default value if not
// initialized.
class CaptureApiClass {
@@ -166,43 +189,25 @@ class MEDIA_EXPORT VideoCaptureDevice {
// Manages a list of Name entries.
typedef std::list<Name> Names;
- class MEDIA_EXPORT Client {
+ // Interface defining the methods that clients of VideoCapture must have. It
+ // is actually two-in-one: clients may implement OnIncomingCapturedData() or
+ // ReserveOutputBuffer() + OnIncomingCapturedVideoFrame(), or all of them.
+ // All clients must implement OnError().
+ class MEDIA_EXPORT Client {
public:
// Memory buffer returned by Client::ReserveOutputBuffer().
- class Buffer : public base::RefCountedThreadSafe<Buffer> {
+ class MEDIA_EXPORT Buffer {
public:
- int id() const { return id_; }
- void* data() const { return data_; }
- size_t size() const { return size_; }
-
- protected:
- friend class base::RefCountedThreadSafe<Buffer>;
-
- Buffer(int id, void* data, size_t size)
- : id_(id), data_(data), size_(size) {}
- virtual ~Buffer() {}
-
- const int id_;
- void* const data_;
- const size_t size_;
+ virtual ~Buffer() = 0;
+ virtual int id() const = 0;
+ virtual size_t size() const = 0;
+ virtual void* data() = 0;
+ virtual gfx::GpuMemoryBufferType GetType() = 0;
+ virtual ClientBuffer AsClientBuffer() = 0;
};
virtual ~Client() {}
- // Reserve an output buffer into which contents can be captured directly.
- // The returned Buffer will always be allocated with a memory size suitable
- // for holding a packed video frame with pixels of |format| format, of
- // |dimensions| frame dimensions. It is permissible for |dimensions| to be
- // zero; in which case the returned Buffer does not guarantee memory
- // backing, but functions as a reservation for external input for the
- // purposes of buffer throttling.
- //
- // The output buffer stays reserved for use until the Buffer object is
- // destroyed.
- virtual scoped_refptr<Buffer> ReserveOutputBuffer(
- media::VideoFrame::Format format,
- const gfx::Size& dimensions) = 0;
-
// Captured a new video frame, data for which is pointed to by |data|.
//
// The format of the frame is described by |frame_format|, and is assumed to
@@ -212,19 +217,50 @@ class MEDIA_EXPORT VideoCaptureDevice {
virtual void OnIncomingCapturedData(const uint8* data,
int length,
const VideoCaptureFormat& frame_format,
- int rotation, // Clockwise.
- base::TimeTicks timestamp) = 0;
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp) = 0;
+
+ // Captured a 3 planar YUV frame. Planes are possibly disjoint.
+ // |frame_format| must indicate I420.
+ virtual void OnIncomingCapturedYuvData(
+ const uint8* y_data,
+ const uint8* u_data,
+ const uint8* v_data,
+ size_t y_stride,
+ size_t u_stride,
+ size_t v_stride,
+ const VideoCaptureFormat& frame_format,
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp) = 0;
- // Captured a new video frame, held in |frame|.
+ // Reserve an output buffer into which contents can be captured directly.
+ // The returned Buffer will always be allocated with a memory size suitable
+ // for holding a packed video frame with pixels of |format| format, of
+ // |dimensions| frame dimensions. It is permissible for |dimensions| to be
+ // zero; in which case the returned Buffer does not guarantee memory
+ // backing, but functions as a reservation for external input for the
+ // purposes of buffer throttling.
//
- // As the frame is backed by a reservation returned by
+ // The output buffer stays reserved and mapped for use until the Buffer
+ // object is destroyed or returned.
+ virtual scoped_ptr<Buffer> ReserveOutputBuffer(
+ media::VideoPixelFormat format,
+ const gfx::Size& dimensions) = 0;
+
+ // Captured new video data, held in |frame| or |buffer|, respectively for
+ // OnIncomingCapturedVideoFrame() and OnIncomingCapturedBuffer().
+ //
+ // In both cases, as the frame is backed by a reservation returned by
// ReserveOutputBuffer(), delivery is guaranteed and will require no
// additional copies in the browser process.
+ virtual void OnIncomingCapturedBuffer(
+ scoped_ptr<Buffer> buffer,
+ const VideoCaptureFormat& frame_format,
+ const base::TimeTicks& timestamp) = 0;
virtual void OnIncomingCapturedVideoFrame(
- const scoped_refptr<Buffer>& buffer,
- const VideoCaptureFormat& buffer_format,
- const scoped_refptr<media::VideoFrame>& frame,
- base::TimeTicks timestamp) = 0;
+ scoped_ptr<Buffer> buffer,
+ const scoped_refptr<VideoFrame>& frame,
+ const base::TimeTicks& timestamp) = 0;
// An error has occurred that cannot be handled and VideoCaptureDevice must
// be StopAndDeAllocate()-ed. |reason| is a text description of the error.
@@ -232,41 +268,6 @@ class MEDIA_EXPORT VideoCaptureDevice {
// VideoCaptureDevice requests the |message| to be logged.
virtual void OnLog(const std::string& message) {}
-
- // The video stream has been muted. After this callback, no more
- // OnIncomingCapturedData() will be called. This may happen when
- // CaptureImage() has called. After the still image captured, the client
- // will get notified by OnUnmute() and the video stream will be resumed.
- virtual void OnMute() {}
-
- // The video stream has resumed.
- virtual void OnUnmute() {}
- };
-
- // Interface for clients that use VideoCaptureDevice for taking still images.
- class MEDIA_EXPORT ImageClient {
- public:
- virtual ~ImageClient() {}
-
- // Callback function to notify the client a captured image is available.
- //
- // The captured still image is stored at address |data| and is of |length|
- // bytes. The format of the frame is described by |format|, and is assumed
- // to be tightly packed. The still image should be rotated |rotation|
- // degrees clockwise for viewing.
- //
- // Note that the content in |data| will not be valid after this callback
- // returns. Copy the content to use it later.
- virtual void OnIncomingCapturedData(const uint8* data,
- size_t length,
- const ImageCaptureFormat& format,
- int rotation,
- base::TimeTicks timestamp) = 0;
-
- // Callback function to notify the client about a failure of the image
- // capture. The VideoCaptureDevice must be StopAndDeAllocate()-ed.
- // |reason| contains a text description of the error.
- virtual void OnError(const std::string& reason) = 0;
};
virtual ~VideoCaptureDevice();
@@ -294,32 +295,6 @@ class MEDIA_EXPORT VideoCaptureDevice {
// defined, otherwise returns 0.
int GetPowerLineFrequencyForLocation() const;
- // Initializes the device for still image capture for the given image format.
- // This call is synchronous and returns true iff the initialization is
- // successful.
- //
- // This function must be called between AllocateAndStart() and
- // StopAndDeAllocate().
- virtual bool InitializeImageCapture(const ImageCaptureFormat& image_format,
- scoped_ptr<ImageClient> client);
-
- // Releases resources for image capture.
- //
- // The ImageClient passed from InitializeImageCapture will be freed. This
- // method must be called between InitializeImageCapture() and
- // StopAndDeAllocate().
- virtual void ReleaseImageCapture() {}
-
- // Requests one image from the device.
- //
- // The image will be returned via the ImageClient::OnIncomingCapturedData()
- // callback. If the video stream has to be stopped to capture the still image,
- // the Client::OnMute() and Client::OnUnmute() will be called.
- //
- // This function must be called between InitializeImageCapture() and
- // ReleaseImageCapture().
- virtual void CaptureImage() {}
-
protected:
static const int kPowerLine50Hz = 50;
static const int kPowerLine60Hz = 60;
diff --git a/chromium/media/video/capture/video_capture_device_factory.cc b/chromium/media/video/capture/video_capture_device_factory.cc
index 00a8f7ec3d4..c87c39a11b5 100644
--- a/chromium/media/video/capture/video_capture_device_factory.cc
+++ b/chromium/media/video/capture/video_capture_device_factory.cc
@@ -9,22 +9,13 @@
#include "media/video/capture/fake_video_capture_device_factory.h"
#include "media/video/capture/file_video_capture_device_factory.h"
-#if defined(OS_MACOSX)
-#include "media/video/capture/mac/video_capture_device_factory_mac.h"
-#elif defined(OS_LINUX)
-#include "media/video/capture/linux/video_capture_device_factory_linux.h"
-#elif defined(OS_ANDROID)
-#include "media/video/capture/android/video_capture_device_factory_android.h"
-#elif defined(OS_WIN)
-#include "media/video/capture/win/video_capture_device_factory_win.h"
-#endif
-
namespace media {
// static
scoped_ptr<VideoCaptureDeviceFactory> VideoCaptureDeviceFactory::CreateFactory(
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
- const CommandLine* command_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* command_line =
+ base::CommandLine::ForCurrentProcess();
// Use a Fake or File Video Device Factory if the command line flags are
// present, otherwise use the normal, platform-dependent, device factory.
if (command_line->HasSwitch(switches::kUseFakeDeviceForMediaStream)) {
@@ -38,22 +29,8 @@ scoped_ptr<VideoCaptureDeviceFactory> VideoCaptureDeviceFactory::CreateFactory(
} else {
// |ui_task_runner| is needed for the Linux ChromeOS factory to retrieve
// screen rotations and for the Mac factory to run QTKit device enumeration.
-#if defined(OS_MACOSX)
- return scoped_ptr<VideoCaptureDeviceFactory>(new
- VideoCaptureDeviceFactoryMac(ui_task_runner));
-#elif defined(OS_LINUX)
- return scoped_ptr<VideoCaptureDeviceFactory>(new
- VideoCaptureDeviceFactoryLinux(ui_task_runner));
-#elif defined(OS_ANDROID)
- return scoped_ptr<VideoCaptureDeviceFactory>(new
- VideoCaptureDeviceFactoryAndroid());
-#elif defined(OS_WIN)
- return scoped_ptr<VideoCaptureDeviceFactory>(new
- VideoCaptureDeviceFactoryWin());
-#else
- return scoped_ptr<VideoCaptureDeviceFactory>(new
- VideoCaptureDeviceFactory());
-#endif
+ return scoped_ptr<VideoCaptureDeviceFactory>(
+ CreateVideoCaptureDeviceFactory(ui_task_runner));
}
}
@@ -73,4 +50,14 @@ void VideoCaptureDeviceFactory::EnumerateDeviceNames(const base::Callback<
callback.Run(device_names.Pass());
}
+#if !defined(OS_MACOSX) && !defined(OS_LINUX) && !defined(OS_ANDROID) && !defined(OS_WIN)
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ NOTIMPLEMENTED();
+ return NULL;
+}
+#endif
+
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device_factory.h b/chromium/media/video/capture/video_capture_device_factory.h
index f8c2196298d..224cdba8949 100644
--- a/chromium/media/video/capture/video_capture_device_factory.h
+++ b/chromium/media/video/capture/video_capture_device_factory.h
@@ -38,13 +38,6 @@ class MEDIA_EXPORT VideoCaptureDeviceFactory {
const VideoCaptureDevice::Name& device,
VideoCaptureFormats* supported_formats) = 0;
- // Gets the supported formats for still image of a particular device attached
- // to the system. In case format enumeration is not supported, or there was
- // a problem, the formats array will be empty.
- virtual void GetDeviceSupportedImageFormats(
- const VideoCaptureDevice::Name& device,
- ImageCaptureFormats* supported_formats) {}
-
protected:
// Gets the names of all video capture devices connected to this computer.
// Used by the default implementation of EnumerateDeviceNames().
@@ -53,6 +46,9 @@ class MEDIA_EXPORT VideoCaptureDeviceFactory {
base::ThreadChecker thread_checker_;
private:
+ static VideoCaptureDeviceFactory* CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+
DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactory);
};
diff --git a/chromium/media/video/capture/video_capture_device_info.h b/chromium/media/video/capture/video_capture_device_info.h
index e8c2a9b986d..d215cf5755b 100644
--- a/chromium/media/video/capture/video_capture_device_info.h
+++ b/chromium/media/video/capture/video_capture_device_info.h
@@ -5,8 +5,8 @@
#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_INFO_H_
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_INFO_H_
+#include "media/base/video_capture_types.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
namespace media {
diff --git a/chromium/media/video/capture/video_capture_device_unittest.cc b/chromium/media/video/capture/video_capture_device_unittest.cc
index 3ceab4690e8..3ae4ef5c07d 100644
--- a/chromium/media/video/capture/video_capture_device_unittest.cc
+++ b/chromium/media/video/capture/video_capture_device_unittest.cc
@@ -6,13 +6,14 @@
#include "base/bind_helpers.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/test/test_timeouts.h"
+#include "base/thread_task_runner_handle.h"
#include "base/threading/thread.h"
+#include "media/base/video_capture_types.h"
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_device_factory.h"
-#include "media/video/capture/video_capture_types.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -22,6 +23,7 @@
#endif
#if defined(OS_MACOSX)
+#include "media/base/mac/avfoundation_glue.h"
#include "media/video/capture/mac/video_capture_device_factory_mac.h"
#endif
@@ -37,16 +39,12 @@
#define MAYBE_CaptureMjpeg DISABLED_CaptureMjpeg
#elif defined(OS_WIN)
#define MAYBE_AllocateBadSize AllocateBadSize
-// Windows currently uses DirectShow to convert from MJPEG and a raw format is
-// always delivered.
-#define MAYBE_CaptureMjpeg DISABLED_CaptureMjpeg
+#define MAYBE_CaptureMjpeg CaptureMjpeg
#elif defined(OS_ANDROID)
// TODO(wjia): enable those tests on Android.
// On Android, native camera (JAVA) delivers frames on UI thread which is the
// main thread for tests. This results in no frame received by
// VideoCaptureAndroid.
-#define CaptureVGA DISABLED_CaptureVGA
-#define Capture720p DISABLED_Capture720p
#define MAYBE_AllocateBadSize DISABLED_AllocateBadSize
#define ReAllocateCamera DISABLED_ReAllocateCamera
#define DeAllocateCameraWhileRunning DISABLED_DeAllocateCameraWhileRunning
@@ -61,35 +59,59 @@ using ::testing::_;
using ::testing::SaveArg;
namespace media {
+namespace {
+
+static const gfx::Size kCaptureSizes[] = {
+ gfx::Size(640, 480),
+ gfx::Size(1280, 720)
+};
-class MockClient : public media::VideoCaptureDevice::Client {
+class MockClient : public VideoCaptureDevice::Client {
public:
- MOCK_METHOD2(ReserveOutputBuffer,
- scoped_refptr<Buffer>(media::VideoFrame::Format format,
- const gfx::Size& dimensions));
- MOCK_METHOD0(OnErr, void());
+ MOCK_METHOD9(OnIncomingCapturedYuvData,
+ void(const uint8* y_data,
+ const uint8* u_data,
+ const uint8* v_data,
+ size_t y_stride,
+ size_t u_stride,
+ size_t v_stride,
+ const VideoCaptureFormat& frame_format,
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp));
+ MOCK_METHOD0(DoReserveOutputBuffer, void(void));
+ MOCK_METHOD0(DoOnIncomingCapturedBuffer, void(void));
+ MOCK_METHOD0(DoOnIncomingCapturedVideoFrame, void(void));
+ MOCK_METHOD1(OnError, void(const std::string& reason));
explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
- : main_thread_(base::MessageLoopProxy::current()), frame_cb_(frame_cb) {}
-
- virtual void OnError(const std::string& error_message) override {
- OnErr();
- }
-
- virtual void OnIncomingCapturedData(const uint8* data,
- int length,
- const VideoCaptureFormat& format,
- int rotation,
- base::TimeTicks timestamp) override {
+ : main_thread_(base::ThreadTaskRunnerHandle::Get()),
+ frame_cb_(frame_cb) {}
+
+ void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& format,
+ int rotation,
+ const base::TimeTicks& timestamp) override {
+ ASSERT_GT(length, 0);
+ ASSERT_TRUE(data != NULL);
main_thread_->PostTask(FROM_HERE, base::Bind(frame_cb_, format));
}
- virtual void OnIncomingCapturedVideoFrame(
- const scoped_refptr<Buffer>& buffer,
- const media::VideoCaptureFormat& buffer_format,
- const scoped_refptr<media::VideoFrame>& frame,
- base::TimeTicks timestamp) override {
- NOTREACHED();
+ // Trampoline methods to workaround GMOCK problems with scoped_ptr<>.
+ scoped_ptr<Buffer> ReserveOutputBuffer(VideoPixelFormat format,
+ const gfx::Size& dimensions) override {
+ DoReserveOutputBuffer();
+ return scoped_ptr<Buffer>();
+ }
+ void OnIncomingCapturedBuffer(scoped_ptr<Buffer> buffer,
+ const VideoCaptureFormat& frame_format,
+ const base::TimeTicks& timestamp) override {
+ DoOnIncomingCapturedBuffer();
+ }
+ void OnIncomingCapturedVideoFrame(scoped_ptr<Buffer> buffer,
+ const scoped_refptr<VideoFrame>& frame,
+ const base::TimeTicks& timestamp) override {
+ DoOnIncomingCapturedVideoFrame();
}
private:
@@ -98,13 +120,13 @@ class MockClient : public media::VideoCaptureDevice::Client {
};
class DeviceEnumerationListener :
- public base::RefCounted<DeviceEnumerationListener>{
+ public base::RefCounted<DeviceEnumerationListener> {
public:
MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
- void(media::VideoCaptureDevice::Names* names));
+ void(VideoCaptureDevice::Names* names));
// GMock doesn't support move-only arguments, so we use this forward method.
void OnEnumeratedDevicesCallback(
- scoped_ptr<media::VideoCaptureDevice::Names> names) {
+ scoped_ptr<VideoCaptureDevice::Names> names) {
OnEnumeratedDevicesCallbackPtr(names.release());
}
private:
@@ -112,9 +134,12 @@ class DeviceEnumerationListener :
virtual ~DeviceEnumerationListener() {}
};
-class VideoCaptureDeviceTest : public testing::Test {
+} // namespace
+
+class VideoCaptureDeviceTest :
+ public testing::TestWithParam<gfx::Size> {
protected:
- typedef media::VideoCaptureDevice::Client Client;
+ typedef VideoCaptureDevice::Client Client;
VideoCaptureDeviceTest()
: loop_(new base::MessageLoop()),
@@ -122,15 +147,23 @@ class VideoCaptureDeviceTest : public testing::Test {
new MockClient(base::Bind(&VideoCaptureDeviceTest::OnFrameCaptured,
base::Unretained(this)))),
video_capture_device_factory_(VideoCaptureDeviceFactory::CreateFactory(
- base::MessageLoopProxy::current())) {
+ base::ThreadTaskRunnerHandle::Get())) {
device_enumeration_listener_ = new DeviceEnumerationListener();
}
void SetUp() override {
#if defined(OS_ANDROID)
- media::VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(
+ VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(
base::android::AttachCurrentThread());
#endif
+#if defined(OS_MACOSX)
+ AVFoundationGlue::InitializeAVFoundation();
+#endif
+ EXPECT_CALL(*client_, OnIncomingCapturedYuvData(_,_,_,_,_,_,_,_,_))
+ .Times(0);
+ EXPECT_CALL(*client_, DoReserveOutputBuffer()).Times(0);
+ EXPECT_CALL(*client_, DoOnIncomingCapturedBuffer()).Times(0);
+ EXPECT_CALL(*client_, DoOnIncomingCapturedVideoFrame()).Times(0);
}
void ResetWithNewClient() {
@@ -148,8 +181,8 @@ class VideoCaptureDeviceTest : public testing::Test {
run_loop_->Run();
}
- scoped_ptr<media::VideoCaptureDevice::Names> EnumerateDevices() {
- media::VideoCaptureDevice::Names* names;
+ scoped_ptr<VideoCaptureDevice::Names> EnumerateDevices() {
+ VideoCaptureDevice::Names* names;
EXPECT_CALL(*device_enumeration_listener_.get(),
OnEnumeratedDevicesCallbackPtr(_)).WillOnce(SaveArg<0>(&names));
@@ -157,7 +190,7 @@ class VideoCaptureDeviceTest : public testing::Test {
base::Bind(&DeviceEnumerationListener::OnEnumeratedDevicesCallback,
device_enumeration_listener_));
base::MessageLoop::current()->RunUntilIdle();
- return scoped_ptr<media::VideoCaptureDevice::Names>(names);
+ return scoped_ptr<VideoCaptureDevice::Names>(names);
}
const VideoCaptureFormat& last_format() const { return last_format_; }
@@ -165,30 +198,44 @@ class VideoCaptureDeviceTest : public testing::Test {
scoped_ptr<VideoCaptureDevice::Name> GetFirstDeviceNameSupportingPixelFormat(
const VideoPixelFormat& pixel_format) {
names_ = EnumerateDevices();
- if (!names_->size()) {
+ if (names_->empty()) {
DVLOG(1) << "No camera available.";
return scoped_ptr<VideoCaptureDevice::Name>();
}
- VideoCaptureDevice::Names::iterator names_iterator;
- for (names_iterator = names_->begin(); names_iterator != names_->end();
- ++names_iterator) {
+ for (const auto& names_iterator : *names_) {
VideoCaptureFormats supported_formats;
video_capture_device_factory_->GetDeviceSupportedFormats(
- *names_iterator,
+ names_iterator,
&supported_formats);
- VideoCaptureFormats::iterator formats_iterator;
- for (formats_iterator = supported_formats.begin();
- formats_iterator != supported_formats.end(); ++formats_iterator) {
- if (formats_iterator->pixel_format == pixel_format) {
+ for (const auto& formats_iterator : supported_formats) {
+ if (formats_iterator.pixel_format == pixel_format) {
return scoped_ptr<VideoCaptureDevice::Name>(
- new VideoCaptureDevice::Name(*names_iterator));
+ new VideoCaptureDevice::Name(names_iterator));
}
}
}
- DVLOG(1) << "No camera can capture the format: " << pixel_format;
+ DVLOG_IF(1, pixel_format != PIXEL_FORMAT_MAX) << "No camera can capture the"
+ << " format: " << VideoCaptureFormat::PixelFormatToString(pixel_format);
return scoped_ptr<VideoCaptureDevice::Name>();
}
+ bool IsCaptureSizeSupported(const VideoCaptureDevice::Name& device,
+ const gfx::Size& size) {
+ VideoCaptureFormats supported_formats;
+ video_capture_device_factory_->GetDeviceSupportedFormats(
+ device, &supported_formats);
+ const auto it =
+ std::find_if(supported_formats.begin(), supported_formats.end(),
+ [&size](VideoCaptureFormat const& f) {
+ return f.frame_size == size;
+ });
+ if (it == supported_formats.end()) {
+ DVLOG(1) << "Size " << size.ToString() << " is not supported.";
+ return false;
+ }
+ return true;
+ }
+
#if defined(OS_WIN)
base::win::ScopedCOMInitializer initialize_com_;
#endif
@@ -233,7 +280,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_OpenInvalidDevice) {
} else {
// The presence of the actual device is only checked on AllocateAndStart()
// and not on creation for QTKit API in Mac OS X platform.
- EXPECT_CALL(*client_, OnErr()).Times(1);
+ EXPECT_CALL(*client_, OnError(_)).Times(1);
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(640, 480);
@@ -245,60 +292,50 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_OpenInvalidDevice) {
#endif
}
-TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
+TEST_P(VideoCaptureDeviceTest, CaptureWithSize) {
names_ = EnumerateDevices();
- if (!names_->size()) {
+ if (names_->empty()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
- scoped_ptr<VideoCaptureDevice> device(
- video_capture_device_factory_->Create(names_->front()));
- ASSERT_TRUE(device);
- DVLOG(1) << names_->front().id();
-
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
-
- VideoCaptureParams capture_params;
- capture_params.requested_format.frame_size.SetSize(640, 480);
- capture_params.requested_format.frame_rate = 30;
- capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
- device->AllocateAndStart(capture_params, client_.Pass());
- // Get captured video frames.
- WaitForCapturedFrame();
- EXPECT_EQ(last_format().frame_size.width(), 640);
- EXPECT_EQ(last_format().frame_size.height(), 480);
- device->StopAndDeAllocate();
-}
-
-TEST_F(VideoCaptureDeviceTest, Capture720p) {
- names_ = EnumerateDevices();
- if (!names_->size()) {
- DVLOG(1) << "No camera available. Exiting test.";
+ const gfx::Size& size = GetParam();
+ if (!IsCaptureSizeSupported(names_->front(), size))
return;
- }
+ const int width = size.width();
+ const int height = size.height();
scoped_ptr<VideoCaptureDevice> device(
video_capture_device_factory_->Create(names_->front()));
ASSERT_TRUE(device);
+ DVLOG(1) << names_->front().id();
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
VideoCaptureParams capture_params;
- capture_params.requested_format.frame_size.SetSize(1280, 720);
- capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.frame_size.SetSize(width, height);
+ capture_params.requested_format.frame_rate = 30.0f;
capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
device->AllocateAndStart(capture_params, client_.Pass());
// Get captured video frames.
WaitForCapturedFrame();
+ EXPECT_EQ(last_format().frame_size.width(), width);
+ EXPECT_EQ(last_format().frame_size.height(), height);
+ if (last_format().pixel_format != PIXEL_FORMAT_MJPEG)
+ EXPECT_LE(static_cast<size_t>(width * height * 3 / 2),
+ last_format().ImageAllocationSize());
device->StopAndDeAllocate();
}
+#if !defined(OS_ANDROID)
+INSTANTIATE_TEST_CASE_P(MAYBE_VideoCaptureDeviceTests,
+ VideoCaptureDeviceTest,
+ testing::ValuesIn(kCaptureSizes));
+#endif
+
TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
names_ = EnumerateDevices();
- if (!names_->size()) {
+ if (names_->empty()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
@@ -306,8 +343,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
video_capture_device_factory_->Create(names_->front()));
ASSERT_TRUE(device);
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(637, 472);
@@ -318,6 +354,8 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
device->StopAndDeAllocate();
EXPECT_EQ(last_format().frame_size.width(), 640);
EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(static_cast<size_t>(640 * 480 * 3 / 2),
+ last_format().ImageAllocationSize());
}
// Cause hangs on Windows Debug. http://crbug.com/417824
@@ -329,7 +367,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
TEST_F(VideoCaptureDeviceTest, MAYBE_ReAllocateCamera) {
names_ = EnumerateDevices();
- if (!names_->size()) {
+ if (names_->empty()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
@@ -373,7 +411,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_ReAllocateCamera) {
TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
names_ = EnumerateDevices();
- if (!names_->size()) {
+ if (names_->empty()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
@@ -381,8 +419,7 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
video_capture_device_factory_->Create(names_->front()));
ASSERT_TRUE(device);
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(640, 480);
@@ -409,8 +446,7 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
video_capture_device_factory_->Create(*name));
ASSERT_TRUE(device);
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(1280, 720);
@@ -422,6 +458,8 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
// Verify we get MJPEG from the device. Not all devices can capture 1280x720
// @ 30 fps, so we don't care about the exact resolution we get.
EXPECT_EQ(last_format().pixel_format, PIXEL_FORMAT_MJPEG);
+ EXPECT_GE(static_cast<size_t>(1280 * 720),
+ last_format().ImageAllocationSize());
device->StopAndDeAllocate();
}
diff --git a/chromium/media/video/capture/win/capability_list_win.cc b/chromium/media/video/capture/win/capability_list_win.cc
index cefda435379..2de2786e752 100644
--- a/chromium/media/video/capture/win/capability_list_win.cc
+++ b/chromium/media/video/capture/win/capability_list_win.cc
@@ -39,15 +39,16 @@ static bool CompareCapability(const VideoCaptureFormat& requested,
return lhs.pixel_format < rhs.pixel_format;
}
-CapabilityWin GetBestMatchedCapability(const VideoCaptureFormat& requested,
- const CapabilityList& capabilities) {
+const CapabilityWin& GetBestMatchedCapability(
+ const VideoCaptureFormat& requested,
+ const CapabilityList& capabilities) {
DCHECK(!capabilities.empty());
- CapabilityWin best_match = capabilities.front();
+ const CapabilityWin* best_match = &(*capabilities.begin());
for (const CapabilityWin& capability : capabilities) {
- if (CompareCapability(requested, capability, best_match))
- best_match = capability;
+ if (CompareCapability(requested, capability, *best_match))
+ best_match = &capability;
}
- return best_match;
+ return *best_match;
}
} // namespace media
diff --git a/chromium/media/video/capture/win/capability_list_win.h b/chromium/media/video/capture/win/capability_list_win.h
index e173439cd4d..dc74947629b 100644
--- a/chromium/media/video/capture/win/capability_list_win.h
+++ b/chromium/media/video/capture/win/capability_list_win.h
@@ -10,22 +10,36 @@
#define MEDIA_VIDEO_CAPTURE_WIN_CAPABILITY_LIST_WIN_H_
#include <list>
+#include <windows.h>
-#include "media/video/capture/video_capture_types.h"
+#include "media/base/video_capture_types.h"
namespace media {
struct CapabilityWin {
CapabilityWin(int index, const VideoCaptureFormat& format)
- : stream_index(index), supported_format(format) {}
- int stream_index;
- VideoCaptureFormat supported_format;
+ : stream_index(index), supported_format(format), info_header() {}
+
+ // Used by VideoCaptureDeviceWin.
+ CapabilityWin(int index, const VideoCaptureFormat& format,
+ const BITMAPINFOHEADER& info_header)
+ : stream_index(index),
+ supported_format(format),
+ info_header(info_header) {
+ }
+
+ const int stream_index;
+ const VideoCaptureFormat supported_format;
+
+ // |info_header| is only valid if DirectShow is used.
+ const BITMAPINFOHEADER info_header;
};
typedef std::list<CapabilityWin> CapabilityList;
-CapabilityWin GetBestMatchedCapability(const VideoCaptureFormat& requested,
- const CapabilityList& capabilities);
+const CapabilityWin& GetBestMatchedCapability(
+ const VideoCaptureFormat& requested,
+ const CapabilityList& capabilities);
} // namespace media
diff --git a/chromium/media/video/capture/win/filter_base_win.cc b/chromium/media/video/capture/win/filter_base_win.cc
index 50cbcff8320..d371f8be0bb 100644
--- a/chromium/media/video/capture/win/filter_base_win.cc
+++ b/chromium/media/video/capture/win/filter_base_win.cc
@@ -18,11 +18,8 @@ class PinEnumerator final
index_(0) {
}
- ~PinEnumerator() {
- }
-
// IUnknown implementation.
- STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) {
+ STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) override {
if (iid == IID_IEnumPins || iid == IID_IUnknown) {
AddRef();
*object_ptr = static_cast<IEnumPins*>(this);
@@ -31,18 +28,18 @@ class PinEnumerator final
return E_NOINTERFACE;
}
- STDMETHOD_(ULONG, AddRef)() {
+ STDMETHOD_(ULONG, AddRef)() override {
base::RefCounted<PinEnumerator>::AddRef();
return 1;
}
- STDMETHOD_(ULONG, Release)() {
+ STDMETHOD_(ULONG, Release)() override {
base::RefCounted<PinEnumerator>::Release();
return 1;
}
// Implement IEnumPins.
- STDMETHOD(Next)(ULONG count, IPin** pins, ULONG* fetched) {
+ STDMETHOD(Next)(ULONG count, IPin** pins, ULONG* fetched) override {
ULONG pins_fetched = 0;
while (pins_fetched < count && filter_->NoOfPins() > index_) {
IPin* pin = filter_->GetPin(index_++);
@@ -56,7 +53,7 @@ class PinEnumerator final
return pins_fetched == count ? S_OK : S_FALSE;
}
- STDMETHOD(Skip)(ULONG count) {
+ STDMETHOD(Skip)(ULONG count) override {
if (filter_->NoOfPins()- index_ > count) {
index_ += count;
return S_OK;
@@ -65,13 +62,13 @@ class PinEnumerator final
return S_FALSE;
}
- STDMETHOD(Reset)() {
+ STDMETHOD(Reset)() override {
index_ = 0;
return S_OK;
}
- STDMETHOD(Clone)(IEnumPins** clone) {
- PinEnumerator* pin_enum = new PinEnumerator(filter_);
+ STDMETHOD(Clone)(IEnumPins** clone) override {
+ PinEnumerator* pin_enum = new PinEnumerator(filter_.get());
pin_enum->AddRef();
pin_enum->index_ = index_;
*clone = pin_enum;
@@ -79,6 +76,9 @@ class PinEnumerator final
}
private:
+ friend class base::RefCounted<PinEnumerator>;
+ ~PinEnumerator() {}
+
scoped_refptr<FilterBase> filter_;
size_t index_;
};
@@ -86,9 +86,6 @@ class PinEnumerator final
FilterBase::FilterBase() : state_(State_Stopped) {
}
-FilterBase::~FilterBase() {
-}
-
STDMETHODIMP FilterBase::EnumPins(IEnumPins** enum_pins) {
*enum_pins = new PinEnumerator(this);
(*enum_pins)->AddRef();
@@ -100,7 +97,7 @@ STDMETHODIMP FilterBase::FindPin(LPCWSTR id, IPin** pin) {
}
STDMETHODIMP FilterBase::QueryFilterInfo(FILTER_INFO* info) {
- info->pGraph = owning_graph_;
+ info->pGraph = owning_graph_.get();
info->achName[0] = L'\0';
if (info->pGraph)
info->pGraph->AddRef();
@@ -174,4 +171,7 @@ ULONG STDMETHODCALLTYPE FilterBase::Release() {
return 1;
}
+FilterBase::~FilterBase() {
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/win/filter_base_win.h b/chromium/media/video/capture/win/filter_base_win.h
index 9d5aa76f06d..25876edea41 100644
--- a/chromium/media/video/capture/win/filter_base_win.h
+++ b/chromium/media/video/capture/win/filter_base_win.h
@@ -22,7 +22,6 @@ class FilterBase
public base::RefCounted<FilterBase> {
public:
FilterBase();
- virtual ~FilterBase();
// Number of pins connected to this filter.
virtual size_t NoOfPins() = 0;
@@ -30,36 +29,40 @@ class FilterBase
virtual IPin* GetPin(int index) = 0;
// Inherited from IUnknown.
- STDMETHOD(QueryInterface)(REFIID id, void** object_ptr);
- STDMETHOD_(ULONG, AddRef)();
- STDMETHOD_(ULONG, Release)();
+ STDMETHOD(QueryInterface)(REFIID id, void** object_ptr) override;
+ STDMETHOD_(ULONG, AddRef)() override;
+ STDMETHOD_(ULONG, Release)() override;
// Inherited from IBaseFilter.
- STDMETHOD(EnumPins)(IEnumPins** enum_pins);
+ STDMETHOD(EnumPins)(IEnumPins** enum_pins) override;
- STDMETHOD(FindPin)(LPCWSTR id, IPin** pin);
+ STDMETHOD(FindPin)(LPCWSTR id, IPin** pin) override;
- STDMETHOD(QueryFilterInfo)(FILTER_INFO* info);
+ STDMETHOD(QueryFilterInfo)(FILTER_INFO* info) override;
- STDMETHOD(JoinFilterGraph)(IFilterGraph* graph, LPCWSTR name);
+ STDMETHOD(JoinFilterGraph)(IFilterGraph* graph, LPCWSTR name) override;
- STDMETHOD(QueryVendorInfo)(LPWSTR* vendor_info);
+ STDMETHOD(QueryVendorInfo)(LPWSTR* vendor_info) override;
// Inherited from IMediaFilter.
- STDMETHOD(Stop)();
+ STDMETHOD(Stop)() override;
- STDMETHOD(Pause)();
+ STDMETHOD(Pause)() override;
- STDMETHOD(Run)(REFERENCE_TIME start);
+ STDMETHOD(Run)(REFERENCE_TIME start) override;
- STDMETHOD(GetState)(DWORD msec_timeout, FILTER_STATE* state);
+ STDMETHOD(GetState)(DWORD msec_timeout, FILTER_STATE* state) override;
- STDMETHOD(SetSyncSource)(IReferenceClock* clock);
+ STDMETHOD(SetSyncSource)(IReferenceClock* clock) override;
- STDMETHOD(GetSyncSource)(IReferenceClock** clock);
+ STDMETHOD(GetSyncSource)(IReferenceClock** clock) override;
// Inherited from IPersistent.
- STDMETHOD(GetClassID)(CLSID* class_id) = 0;
+ STDMETHOD(GetClassID)(CLSID* class_id) override = 0;
+
+ protected:
+ friend class base::RefCounted<FilterBase>;
+ virtual ~FilterBase();
private:
FILTER_STATE state_;
diff --git a/chromium/media/video/capture/win/pin_base_win.cc b/chromium/media/video/capture/win/pin_base_win.cc
index 5f4611d0329..734072588a3 100644
--- a/chromium/media/video/capture/win/pin_base_win.cc
+++ b/chromium/media/video/capture/win/pin_base_win.cc
@@ -18,11 +18,8 @@ class TypeEnumerator final
index_(0) {
}
- ~TypeEnumerator() {
- }
-
// Implement from IUnknown.
- STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) {
+ STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) override {
if (iid == IID_IEnumMediaTypes || iid == IID_IUnknown) {
AddRef();
*object_ptr = static_cast<IEnumMediaTypes*>(this);
@@ -31,18 +28,18 @@ class TypeEnumerator final
return E_NOINTERFACE;
}
- STDMETHOD_(ULONG, AddRef)() {
+ STDMETHOD_(ULONG, AddRef)() override {
base::RefCounted<TypeEnumerator>::AddRef();
return 1;
}
- STDMETHOD_(ULONG, Release)() {
+ STDMETHOD_(ULONG, Release)() override {
base::RefCounted<TypeEnumerator>::Release();
return 1;
}
// Implement IEnumMediaTypes.
- STDMETHOD(Next)(ULONG count, AM_MEDIA_TYPE** types, ULONG* fetched) {
+ STDMETHOD(Next)(ULONG count, AM_MEDIA_TYPE** types, ULONG* fetched) override {
ULONG types_fetched = 0;
while (types_fetched < count) {
@@ -81,18 +78,18 @@ class TypeEnumerator final
return types_fetched == count ? S_OK : S_FALSE;
}
- STDMETHOD(Skip)(ULONG count) {
+ STDMETHOD(Skip)(ULONG count) override {
index_ += count;
return S_OK;
}
- STDMETHOD(Reset)() {
+ STDMETHOD(Reset)() override {
index_ = 0;
return S_OK;
}
- STDMETHOD(Clone)(IEnumMediaTypes** clone) {
- TypeEnumerator* type_enum = new TypeEnumerator(pin_);
+ STDMETHOD(Clone)(IEnumMediaTypes** clone) override {
+ TypeEnumerator* type_enum = new TypeEnumerator(pin_.get());
type_enum->AddRef();
type_enum->index_ = index_;
*clone = type_enum;
@@ -100,6 +97,9 @@ class TypeEnumerator final
}
private:
+ friend class base::RefCounted<TypeEnumerator>;
+ ~TypeEnumerator() {}
+
void FreeAllocatedMediaTypes(ULONG allocated, AM_MEDIA_TYPE** types) {
for (ULONG i = 0; i < allocated; ++i) {
CoTaskMemFree(types[i]->pbFormat);
@@ -116,9 +116,6 @@ PinBase::PinBase(IBaseFilter* owner)
memset(&current_media_type_, 0, sizeof(current_media_type_));
}
-PinBase::~PinBase() {
-}
-
void PinBase::SetOwner(IBaseFilter* owner) {
owner_ = owner;
}
@@ -152,7 +149,7 @@ STDMETHODIMP PinBase::ReceiveConnection(IPin* connector,
}
STDMETHODIMP PinBase::Disconnect() {
- if (!connected_pin_)
+ if (!connected_pin_.get())
return S_FALSE;
connected_pin_.Release();
@@ -160,8 +157,8 @@ STDMETHODIMP PinBase::Disconnect() {
}
STDMETHODIMP PinBase::ConnectedTo(IPin** pin) {
- *pin = connected_pin_;
- if (!connected_pin_)
+ *pin = connected_pin_.get();
+ if (!connected_pin_.get())
return VFW_E_NOT_CONNECTED;
connected_pin_.get()->AddRef();
@@ -169,7 +166,7 @@ STDMETHODIMP PinBase::ConnectedTo(IPin** pin) {
}
STDMETHODIMP PinBase::ConnectionMediaType(AM_MEDIA_TYPE* media_type) {
- if (!connected_pin_)
+ if (!connected_pin_.get())
return VFW_E_NOT_CONNECTED;
*media_type = current_media_type_;
return S_OK;
@@ -287,4 +284,7 @@ STDMETHODIMP_(ULONG) PinBase::Release() {
return 1;
}
+PinBase::~PinBase() {
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/win/pin_base_win.h b/chromium/media/video/capture/win/pin_base_win.h
index 33f403f6f71..f531ec27bf7 100644
--- a/chromium/media/video/capture/win/pin_base_win.h
+++ b/chromium/media/video/capture/win/pin_base_win.h
@@ -23,7 +23,6 @@ class PinBase
public base::RefCounted<PinBase> {
public:
explicit PinBase(IBaseFilter* owner);
- virtual ~PinBase();
// Function used for changing the owner.
// If the owner is deleted the owner should first call this function
@@ -40,59 +39,65 @@ class PinBase
// Called when new media is received. Note that this is not on the same
// thread as where the pin is created.
- STDMETHOD(Receive)(IMediaSample* sample) = 0;
+ STDMETHOD(Receive)(IMediaSample* sample) override = 0;
- STDMETHOD(Connect)(IPin* receive_pin, const AM_MEDIA_TYPE* media_type);
+ STDMETHOD(Connect)(IPin* receive_pin,
+ const AM_MEDIA_TYPE* media_type) override;
STDMETHOD(ReceiveConnection)(IPin* connector,
- const AM_MEDIA_TYPE* media_type);
+ const AM_MEDIA_TYPE* media_type) override;
- STDMETHOD(Disconnect)();
+ STDMETHOD(Disconnect)() override;
- STDMETHOD(ConnectedTo)(IPin** pin);
+ STDMETHOD(ConnectedTo)(IPin** pin) override;
- STDMETHOD(ConnectionMediaType)(AM_MEDIA_TYPE* media_type);
+ STDMETHOD(ConnectionMediaType)(AM_MEDIA_TYPE* media_type) override;
- STDMETHOD(QueryPinInfo)(PIN_INFO* info);
+ STDMETHOD(QueryPinInfo)(PIN_INFO* info) override;
- STDMETHOD(QueryDirection)(PIN_DIRECTION* pin_dir);
+ STDMETHOD(QueryDirection)(PIN_DIRECTION* pin_dir) override;
- STDMETHOD(QueryId)(LPWSTR* id);
+ STDMETHOD(QueryId)(LPWSTR* id) override;
- STDMETHOD(QueryAccept)(const AM_MEDIA_TYPE* media_type);
+ STDMETHOD(QueryAccept)(const AM_MEDIA_TYPE* media_type) override;
- STDMETHOD(EnumMediaTypes)(IEnumMediaTypes** types);
+ STDMETHOD(EnumMediaTypes)(IEnumMediaTypes** types) override;
- STDMETHOD(QueryInternalConnections)(IPin** pins, ULONG* no_pins);
+ STDMETHOD(QueryInternalConnections)(IPin** pins, ULONG* no_pins) override;
- STDMETHOD(EndOfStream)();
+ STDMETHOD(EndOfStream)() override;
- STDMETHOD(BeginFlush)();
+ STDMETHOD(BeginFlush)() override;
- STDMETHOD(EndFlush)();
+ STDMETHOD(EndFlush)() override;
STDMETHOD(NewSegment)(REFERENCE_TIME start,
REFERENCE_TIME stop,
- double dRate);
+ double dRate) override;
// Inherited from IMemInputPin.
- STDMETHOD(GetAllocator)(IMemAllocator** allocator);
+ STDMETHOD(GetAllocator)(IMemAllocator** allocator) override;
- STDMETHOD(NotifyAllocator)(IMemAllocator* allocator, BOOL read_only);
+ STDMETHOD(NotifyAllocator)(IMemAllocator* allocator, BOOL read_only) override;
- STDMETHOD(GetAllocatorRequirements)(ALLOCATOR_PROPERTIES* properties);
+ STDMETHOD(GetAllocatorRequirements)(
+ ALLOCATOR_PROPERTIES* properties) override;
STDMETHOD(ReceiveMultiple)(IMediaSample** samples,
long sample_count,
- long* processed);
- STDMETHOD(ReceiveCanBlock)();
+ long* processed) override;
+ STDMETHOD(ReceiveCanBlock)() override;
// Inherited from IUnknown.
- STDMETHOD(QueryInterface)(REFIID id, void** object_ptr);
+ STDMETHOD(QueryInterface)(REFIID id, void** object_ptr) override;
+
+ STDMETHOD_(ULONG, AddRef)() override;
- STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)() override;
- STDMETHOD_(ULONG, Release)();
+ protected:
+ friend class base::RefCounted<PinBase>;
+ virtual ~PinBase();
private:
AM_MEDIA_TYPE current_media_type_;
diff --git a/chromium/media/video/capture/win/sink_filter_win.cc b/chromium/media/video/capture/win/sink_filter_win.cc
index 8e9c941f6ee..664d25b48d2 100644
--- a/chromium/media/video/capture/win/sink_filter_win.cc
+++ b/chromium/media/video/capture/win/sink_filter_win.cc
@@ -31,12 +31,10 @@ SinkFilter::SinkFilter(SinkFilterObserver* observer)
input_pin_ = new SinkInputPin(this, observer);
}
-SinkFilter::~SinkFilter() {
- input_pin_->SetOwner(NULL);
-}
-
-void SinkFilter::SetRequestedMediaFormat(const VideoCaptureFormat& format) {
- input_pin_->SetRequestedMediaFormat(format);
+void SinkFilter::SetRequestedMediaFormat(VideoPixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header) {
+ input_pin_->SetRequestedMediaFormat(pixel_format, frame_rate, info_header);
}
const VideoCaptureFormat& SinkFilter::ResultingFormat() {
@@ -48,7 +46,7 @@ size_t SinkFilter::NoOfPins() {
}
IPin* SinkFilter::GetPin(int index) {
- return index == 0 ? input_pin_ : NULL;
+ return index == 0 ? input_pin_.get() : NULL;
}
STDMETHODIMP SinkFilter::GetClassID(CLSID* clsid) {
@@ -56,4 +54,8 @@ STDMETHODIMP SinkFilter::GetClassID(CLSID* clsid) {
return S_OK;
}
+SinkFilter::~SinkFilter() {
+ input_pin_->SetOwner(NULL);
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/win/sink_filter_win.h b/chromium/media/video/capture/win/sink_filter_win.h
index 7265de578e6..df11eba6d71 100644
--- a/chromium/media/video/capture/win/sink_filter_win.h
+++ b/chromium/media/video/capture/win/sink_filter_win.h
@@ -11,8 +11,8 @@
#include <windows.h>
#include "base/memory/ref_counted.h"
+#include "media/base/video_capture_types.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
#include "media/video/capture/win/filter_base_win.h"
#include "media/video/capture/win/sink_filter_observer_win.h"
@@ -35,20 +35,23 @@ class __declspec(uuid("88cdbbdc-a73b-4afa-acbf-15d5e2ce12c3"))
SinkFilter : public FilterBase {
public:
explicit SinkFilter(SinkFilterObserver* observer);
- virtual ~SinkFilter();
- void SetRequestedMediaFormat(const VideoCaptureFormat& format);
+ void SetRequestedMediaFormat(VideoPixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header);
// Returns the format that is negotiated when this
// filter is connected to a media filter.
const VideoCaptureFormat& ResultingFormat();
// Implement FilterBase.
- virtual size_t NoOfPins();
- virtual IPin* GetPin(int index);
+ size_t NoOfPins() override;
+ IPin* GetPin(int index) override;
- STDMETHOD(GetClassID)(CLSID* clsid);
+ STDMETHOD(GetClassID)(CLSID* clsid) override;
private:
+ ~SinkFilter() override;
+
scoped_refptr<SinkInputPin> input_pin_;
DISALLOW_IMPLICIT_CONSTRUCTORS(SinkFilter);
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.cc b/chromium/media/video/capture/win/sink_input_pin_win.cc
index 0126e13db8f..bc08809a671 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.cc
+++ b/chromium/media/video/capture/win/sink_input_pin_win.cc
@@ -16,13 +16,86 @@ namespace media {
const REFERENCE_TIME kSecondsToReferenceTime = 10000000;
+
+static DWORD GetArea(const BITMAPINFOHEADER& info_header) {
+ return info_header.biWidth * info_header.biHeight;
+}
+
SinkInputPin::SinkInputPin(IBaseFilter* filter,
SinkFilterObserver* observer)
- : observer_(observer),
+ : requested_frame_rate_(0),
+ observer_(observer),
PinBase(filter) {
}
-SinkInputPin::~SinkInputPin() {}
+void SinkInputPin::SetRequestedMediaFormat(
+ VideoPixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header) {
+ requested_pixel_format_ = pixel_format;
+ requested_frame_rate_ = frame_rate;
+ requested_info_header_ = info_header;
+ resulting_format_.frame_size.SetSize(0, 0);
+ resulting_format_.frame_rate = 0;
+ resulting_format_.pixel_format = PIXEL_FORMAT_UNKNOWN;
+}
+
+const VideoCaptureFormat& SinkInputPin::ResultingFormat() {
+ return resulting_format_;
+}
+
+bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
+ GUID type = media_type->majortype;
+ if (type != MEDIATYPE_Video)
+ return false;
+
+ GUID format_type = media_type->formattype;
+ if (format_type != FORMAT_VideoInfo)
+ return false;
+
+ // Check for the sub types we support.
+ GUID sub_type = media_type->subtype;
+ VIDEOINFOHEADER* pvi =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ if (pvi == NULL)
+ return false;
+
+ // Store the incoming width and height.
+ resulting_format_.frame_size.SetSize(pvi->bmiHeader.biWidth,
+ abs(pvi->bmiHeader.biHeight));
+ if (pvi->AvgTimePerFrame > 0) {
+ resulting_format_.frame_rate =
+ static_cast<int>(kSecondsToReferenceTime / pvi->AvgTimePerFrame);
+ } else {
+ resulting_format_.frame_rate = requested_frame_rate_;
+ }
+ if (sub_type == kMediaSubTypeI420 &&
+ pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
+ resulting_format_.pixel_format = PIXEL_FORMAT_I420;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_YUY2 &&
+ pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
+ resulting_format_.pixel_format = PIXEL_FORMAT_YUY2;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_MJPG &&
+ pvi->bmiHeader.biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
+ resulting_format_.pixel_format = PIXEL_FORMAT_MJPEG;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_RGB24 &&
+ pvi->bmiHeader.biCompression == BI_RGB) {
+ resulting_format_.pixel_format = PIXEL_FORMAT_RGB24;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_RGB32 &&
+ pvi->bmiHeader.biCompression == BI_RGB) {
+ resulting_format_.pixel_format = PIXEL_FORMAT_RGB32;
+ return true;
+ }
+ return false;
+}
bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
if (media_type->cbFormat < sizeof(VIDEOINFOHEADER))
@@ -36,44 +109,63 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
pvi->bmiHeader.biPlanes = 1;
pvi->bmiHeader.biClrImportant = 0;
pvi->bmiHeader.biClrUsed = 0;
- if (requested_format_.frame_rate > 0) {
+ if (requested_frame_rate_ > 0) {
pvi->AvgTimePerFrame =
- kSecondsToReferenceTime / requested_format_.frame_rate;
+ kSecondsToReferenceTime / requested_frame_rate_;
}
media_type->majortype = MEDIATYPE_Video;
media_type->formattype = FORMAT_VideoInfo;
media_type->bTemporalCompression = FALSE;
+ if (requested_pixel_format_ == PIXEL_FORMAT_MJPEG) {
+ // If the requested pixel format is MJPEG, accept only MJPEG.
+ // This is ok since the capabilities of the capturer have been
+ // enumerated and we know that it is supported.
+ if (index != 0)
+ return false;
+
+ pvi->bmiHeader = requested_info_header_;
+ return true;
+ }
+
switch (index) {
case 0: {
pvi->bmiHeader.biCompression = MAKEFOURCC('I', '4', '2', '0');
pvi->bmiHeader.biBitCount = 12; // bit per pixel
- pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
- pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
- pvi->bmiHeader.biSizeImage =
- requested_format_.frame_size.GetArea() * 3 / 2;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 3 / 2;
media_type->subtype = kMediaSubTypeI420;
break;
}
case 1: {
pvi->bmiHeader.biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
pvi->bmiHeader.biBitCount = 16;
- pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
- pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
- pvi->bmiHeader.biSizeImage = requested_format_.frame_size.GetArea() * 2;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 2;
media_type->subtype = MEDIASUBTYPE_YUY2;
break;
}
case 2: {
pvi->bmiHeader.biCompression = BI_RGB;
pvi->bmiHeader.biBitCount = 24;
- pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
- pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
- pvi->bmiHeader.biSizeImage = requested_format_.frame_size.GetArea() * 3;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 3;
media_type->subtype = MEDIASUBTYPE_RGB24;
break;
}
+ case 3: {
+ pvi->bmiHeader.biCompression = BI_RGB;
+ pvi->bmiHeader.biBitCount = 32;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 4;
+ media_type->subtype = MEDIASUBTYPE_RGB32;
+ break;
+ }
default:
return false;
}
@@ -83,52 +175,15 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
return true;
}
-bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
- GUID type = media_type->majortype;
- if (type != MEDIATYPE_Video)
- return false;
-
- GUID format_type = media_type->formattype;
- if (format_type != FORMAT_VideoInfo)
- return false;
-
- // Check for the sub types we support.
- GUID sub_type = media_type->subtype;
- VIDEOINFOHEADER* pvi =
- reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
- if (pvi == NULL)
- return false;
-
- // Store the incoming width and height.
- resulting_format_.frame_size.SetSize(pvi->bmiHeader.biWidth,
- abs(pvi->bmiHeader.biHeight));
- if (pvi->AvgTimePerFrame > 0) {
- resulting_format_.frame_rate =
- static_cast<int>(kSecondsToReferenceTime / pvi->AvgTimePerFrame);
- } else {
- resulting_format_.frame_rate = requested_format_.frame_rate;
- }
- if (sub_type == kMediaSubTypeI420 &&
- pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
- resulting_format_.pixel_format = PIXEL_FORMAT_I420;
- return true; // This format is acceptable.
- }
- if (sub_type == MEDIASUBTYPE_YUY2 &&
- pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
- resulting_format_.pixel_format = PIXEL_FORMAT_YUY2;
- return true; // This format is acceptable.
- }
- if (sub_type == MEDIASUBTYPE_RGB24 &&
- pvi->bmiHeader.biCompression == BI_RGB) {
- resulting_format_.pixel_format = PIXEL_FORMAT_RGB24;
- return true; // This format is acceptable.
- }
- return false;
-}
-
HRESULT SinkInputPin::Receive(IMediaSample* sample) {
const int length = sample->GetActualDataLength();
uint8* buffer = NULL;
+
+ if (length <= 0) {
+ DLOG(WARNING) << "Media sample length is 0 or less.";
+ return S_FALSE;
+ }
+
if (FAILED(sample->GetPointer(&buffer)))
return S_FALSE;
@@ -136,15 +191,6 @@ HRESULT SinkInputPin::Receive(IMediaSample* sample) {
return S_OK;
}
-void SinkInputPin::SetRequestedMediaFormat(const VideoCaptureFormat& format) {
- requested_format_ = format;
- resulting_format_.frame_size.SetSize(0, 0);
- resulting_format_.frame_rate = 0;
- resulting_format_.pixel_format = PIXEL_FORMAT_UNKNOWN;
-}
-
-const VideoCaptureFormat& SinkInputPin::ResultingFormat() {
- return resulting_format_;
-}
+SinkInputPin::~SinkInputPin() {}
} // namespace media
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.h b/chromium/media/video/capture/win/sink_input_pin_win.h
index f14ca33073c..869d001fd5a 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.h
+++ b/chromium/media/video/capture/win/sink_input_pin_win.h
@@ -8,8 +8,8 @@
#ifndef MEDIA_VIDEO_CAPTURE_WIN_SINK_INPUT_PIN_WIN_H_
#define MEDIA_VIDEO_CAPTURE_WIN_SINK_INPUT_PIN_WIN_H_
+#include "media/base/video_capture_types.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
#include "media/video/capture/win/pin_base_win.h"
#include "media/video/capture/win/sink_filter_win.h"
@@ -22,21 +22,26 @@ extern const REFERENCE_TIME kSecondsToReferenceTime;
class SinkInputPin : public PinBase {
public:
SinkInputPin(IBaseFilter* filter, SinkFilterObserver* observer);
- virtual ~SinkInputPin();
- void SetRequestedMediaFormat(const VideoCaptureFormat& format);
+ void SetRequestedMediaFormat(VideoPixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header);
// Returns the capability that is negotiated when this
// pin is connected to a media filter.
const VideoCaptureFormat& ResultingFormat();
// Implement PinBase.
- virtual bool IsMediaTypeValid(const AM_MEDIA_TYPE* media_type);
- virtual bool GetValidMediaType(int index, AM_MEDIA_TYPE* media_type);
+ bool IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) override;
+ bool GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) override;
- STDMETHOD(Receive)(IMediaSample* media_sample);
+ STDMETHOD(Receive)(IMediaSample* media_sample) override;
private:
- VideoCaptureFormat requested_format_;
+ ~SinkInputPin() override;
+
+ VideoPixelFormat requested_pixel_format_;
+ float requested_frame_rate_;
+ BITMAPINFOHEADER requested_info_header_;
VideoCaptureFormat resulting_format_;
SinkFilterObserver* observer_;
diff --git a/chromium/media/video/capture/win/video_capture_device_factory_win.cc b/chromium/media/video/capture/win/video_capture_device_factory_win.cc
index cc374a4042e..7f0d665e4ab 100644
--- a/chromium/media/video/capture/win/video_capture_device_factory_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_factory_win.cc
@@ -101,7 +101,7 @@ static bool CreateVideoCaptureDeviceMediaFoundation(const char* sym_link,
attributes->SetString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK,
base::SysUTF8ToWide(sym_link).c_str());
- return SUCCEEDED(MFCreateDeviceSource(attributes, source));
+ return SUCCEEDED(MFCreateDeviceSource(attributes.get(), source));
}
static bool EnumerateVideoDevicesMediaFoundation(IMFActivate*** devices,
@@ -110,7 +110,7 @@ static bool EnumerateVideoDevicesMediaFoundation(IMFActivate*** devices,
if (!PrepareVideoCaptureAttributesMediaFoundation(attributes.Receive(), 1))
return false;
- return SUCCEEDED(MFEnumDeviceSources(attributes, devices, count));
+ return SUCCEEDED(MFEnumDeviceSources(attributes.get(), devices, count));
}
static bool IsDeviceBlackListed(const std::string& name) {
@@ -127,10 +127,7 @@ static bool IsDeviceBlackListed(const std::string& name) {
return false;
}
-static void GetDeviceNamesDirectShow(
- const CLSID& class_id,
- const Name::CaptureApiType capture_api_type,
- Names* device_names) {
+static void GetDeviceNamesDirectShow(Names* device_names) {
DCHECK(device_names);
DVLOG(1) << " GetDeviceNamesDirectShow";
@@ -141,7 +138,8 @@ static void GetDeviceNamesDirectShow(
return;
ScopedComPtr<IEnumMoniker> enum_moniker;
- hr = dev_enum->CreateClassEnumerator(class_id, enum_moniker.Receive(), 0);
+ hr = dev_enum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ enum_moniker.Receive(), 0);
// CreateClassEnumerator returns S_FALSE on some Windows OS
// when no camera exist. Therefore the FAILED macro can't be used.
if (hr != S_OK)
@@ -165,7 +163,7 @@ static void GetDeviceNamesDirectShow(
if (FAILED(hr) || name.type() != VT_BSTR)
continue;
- const std::string device_name(base::SysWideToUTF8(V_BSTR(&name)));
+ const std::string device_name(base::SysWideToUTF8(V_BSTR(name.ptr())));
if (IsDeviceBlackListed(device_name))
continue;
@@ -176,9 +174,9 @@ static void GetDeviceNamesDirectShow(
id = device_name;
} else {
DCHECK_EQ(name.type(), VT_BSTR);
- id = base::SysWideToUTF8(V_BSTR(&name));
+ id = base::SysWideToUTF8(V_BSTR(name.ptr()));
}
- device_names->push_back(Name(device_name, id, capture_api_type));
+ device_names->push_back(Name(device_name, id, Name::DIRECT_SHOW));
}
}
@@ -237,18 +235,16 @@ static void GetDeviceSupportedFormatsDirectShow(const Name& device,
hr = VideoCaptureDeviceWin::GetDeviceFilter(device.capabilities_id(),
CLSID_VideoInputDeviceCategory,
capture_filter.Receive());
- if (!capture_filter) {
+ if (!capture_filter.get()) {
DLOG(ERROR) << "Failed to create capture filter: "
<< logging::SystemErrorCodeToString(hr);
return;
}
base::win::ScopedComPtr<IPin> output_capture_pin(
- VideoCaptureDeviceWin::GetPin(capture_filter,
- PINDIR_OUTPUT,
- PIN_CATEGORY_CAPTURE,
- GUID_NULL));
- if (!output_capture_pin) {
+ VideoCaptureDeviceWin::GetPin(capture_filter.get(), PINDIR_OUTPUT,
+ PIN_CATEGORY_CAPTURE, GUID_NULL));
+ if (!output_capture_pin.get()) {
DLOG(ERROR) << "Failed to get capture output pin";
return;
}
@@ -315,7 +311,7 @@ static void GetDeviceSupportedFormatsMediaFoundation(
base::win::ScopedComPtr<IMFSourceReader> reader;
HRESULT hr =
- MFCreateSourceReaderFromMediaSource(source, NULL, reader.Receive());
+ MFCreateSourceReaderFromMediaSource(source.get(), NULL, reader.Receive());
if (FAILED(hr)) {
DLOG(ERROR) << "MFCreateSourceReaderFromMediaSource failed: "
<< logging::SystemErrorCodeToString(hr);
@@ -327,7 +323,7 @@ static void GetDeviceSupportedFormatsMediaFoundation(
while (SUCCEEDED(reader->GetNativeMediaType(
kFirstVideoStream, stream_index, type.Receive()))) {
UINT32 width, height;
- hr = MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width, &height);
+ hr = MFGetAttributeSize(type.get(), MF_MT_FRAME_SIZE, &width, &height);
if (FAILED(hr)) {
DLOG(ERROR) << "MFGetAttributeSize failed: "
<< logging::SystemErrorCodeToString(hr);
@@ -337,7 +333,8 @@ static void GetDeviceSupportedFormatsMediaFoundation(
capture_format.frame_size.SetSize(width, height);
UINT32 numerator, denominator;
- hr = MFGetAttributeRatio(type, MF_MT_FRAME_RATE, &numerator, &denominator);
+ hr = MFGetAttributeRatio(type.get(), MF_MT_FRAME_RATE, &numerator,
+ &denominator);
if (FAILED(hr)) {
DLOG(ERROR) << "MFGetAttributeSize failed: "
<< logging::SystemErrorCodeToString(hr);
@@ -384,14 +381,13 @@ VideoCaptureDeviceFactoryWin::VideoCaptureDeviceFactoryWin() {
// DirectShow for any other versions, unless forced via flag. Media Foundation
// can also be forced if appropriate flag is set and we are in Windows 7 or
// 8 in non-Metro mode.
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
use_media_foundation_ = (base::win::IsMetroProcess() &&
!cmd_line->HasSwitch(switches::kForceDirectShowVideoCapture)) ||
(base::win::GetVersion() >= base::win::VERSION_WIN7 &&
cmd_line->HasSwitch(switches::kForceMediaFoundationVideoCapture));
}
-
scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryWin::Create(
const Name& device_name) {
DCHECK(thread_checker_.CalledOnValidThread());
@@ -408,8 +404,7 @@ scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryWin::Create(
if (!static_cast<VideoCaptureDeviceMFWin*>(device.get())->Init(source))
device.reset();
} else {
- DCHECK(device_name.capture_api_type() == Name::DIRECT_SHOW ||
- device_name.capture_api_type() == Name::DIRECT_SHOW_WDM_CROSSBAR);
+ DCHECK(device_name.capture_api_type() == Name::DIRECT_SHOW);
device.reset(new VideoCaptureDeviceWin(device_name));
DVLOG(1) << " DirectShow Device: " << device_name.name();
if (!static_cast<VideoCaptureDeviceWin*>(device.get())->Init())
@@ -423,29 +418,7 @@ void VideoCaptureDeviceFactoryWin::GetDeviceNames(Names* device_names) {
if (use_media_foundation_) {
GetDeviceNamesMediaFoundation(device_names);
} else {
- GetDeviceNamesDirectShow(CLSID_VideoInputDeviceCategory,
- Name::DIRECT_SHOW,
- device_names);
-
- Names crossbar_device_names;
- GetDeviceNamesDirectShow(AM_KSCATEGORY_CROSSBAR,
- Name::DIRECT_SHOW_WDM_CROSSBAR,
- &crossbar_device_names);
- // Search in the listed |device_names| to find a device with matching USB ID
- // to each device in |crossbar_device_names|.
- for (Names::iterator crossbar_device_it = crossbar_device_names.begin();
- crossbar_device_it != crossbar_device_names.end();
- ++crossbar_device_it) {
- const std::string& crossbar_device_model = crossbar_device_it->GetModel();
- for (Names::const_iterator device_it = device_names->begin();
- device_it != device_names->end(); ++device_it) {
- if (crossbar_device_model == device_it->GetModel()) {
- crossbar_device_it->set_capabilities_id(device_it->id());
- device_names->push_back(*crossbar_device_it);
- break;
- }
- }
- }
+ GetDeviceNamesDirectShow(device_names);
}
}
@@ -459,4 +432,11 @@ void VideoCaptureDeviceFactoryWin::GetDeviceSupportedFormats(
GetDeviceSupportedFormatsDirectShow(device, formats);
}
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryWin();
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_factory_win.h b/chromium/media/video/capture/win/video_capture_device_factory_win.h
index 849e1ad4208..f76fe63db08 100644
--- a/chromium/media/video/capture/win/video_capture_device_factory_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_factory_win.h
@@ -19,12 +19,12 @@ class MEDIA_EXPORT VideoCaptureDeviceFactoryWin :
static bool PlatformSupportsMediaFoundation();
VideoCaptureDeviceFactoryWin();
- virtual ~VideoCaptureDeviceFactoryWin() {}
+ ~VideoCaptureDeviceFactoryWin() override {}
- virtual scoped_ptr<VideoCaptureDevice> Create(
+ scoped_ptr<VideoCaptureDevice> Create(
const VideoCaptureDevice::Name& device_name) override;
- virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
- virtual void GetDeviceSupportedFormats(
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void GetDeviceSupportedFormats(
const VideoCaptureDevice::Name& device,
VideoCaptureFormats* supported_formats) override;
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.cc b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
index a222d1ce3b1..95ade7161e4 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
@@ -66,7 +66,7 @@ HRESULT FillCapabilities(IMFSourceReader* source,
while (SUCCEEDED(hr = source->GetNativeMediaType(
kFirstVideoStream, stream_index, type.Receive()))) {
VideoCaptureFormat format;
- if (FillFormat(type, &format))
+ if (FillFormat(type.get(), &format))
capabilities->emplace_back(stream_index, format);
type.Release();
++stream_index;
@@ -91,7 +91,7 @@ class MFReaderCallback final
wait_event_ = event;
}
- STDMETHOD(QueryInterface)(REFIID riid, void** object) {
+ STDMETHOD(QueryInterface)(REFIID riid, void** object) override {
if (riid != IID_IUnknown && riid != IID_IMFSourceReaderCallback)
return E_NOINTERFACE;
*object = static_cast<IMFSourceReaderCallback*>(this);
@@ -99,18 +99,21 @@ class MFReaderCallback final
return S_OK;
}
- STDMETHOD_(ULONG, AddRef)() {
+ STDMETHOD_(ULONG, AddRef)() override {
base::RefCountedThreadSafe<MFReaderCallback>::AddRef();
return 1U;
}
- STDMETHOD_(ULONG, Release)() {
+ STDMETHOD_(ULONG, Release)() override {
base::RefCountedThreadSafe<MFReaderCallback>::Release();
return 1U;
}
- STDMETHOD(OnReadSample)(HRESULT status, DWORD stream_index,
- DWORD stream_flags, LONGLONG time_stamp, IMFSample* sample) {
+ STDMETHOD(OnReadSample)(HRESULT status,
+ DWORD stream_index,
+ DWORD stream_flags,
+ LONGLONG time_stamp,
+ IMFSample* sample) override {
base::TimeTicks stamp(base::TimeTicks::Now());
if (!sample) {
observer_->OnIncomingCapturedData(NULL, 0, 0, stamp);
@@ -123,7 +126,7 @@ class MFReaderCallback final
for (DWORD i = 0; i < count; ++i) {
ScopedComPtr<IMFMediaBuffer> buffer;
sample->GetBufferByIndex(i, buffer.Receive());
- if (buffer) {
+ if (buffer.get()) {
DWORD length = 0, max_length = 0;
BYTE* data = NULL;
buffer->Lock(&data, &max_length, &length);
@@ -134,7 +137,7 @@ class MFReaderCallback final
return S_OK;
}
- STDMETHOD(OnFlush)(DWORD stream_index) {
+ STDMETHOD(OnFlush)(DWORD stream_index) override {
if (wait_event_) {
wait_event_->Signal();
wait_event_ = NULL;
@@ -142,7 +145,7 @@ class MFReaderCallback final
return S_OK;
}
- STDMETHOD(OnEvent)(DWORD stream_index, IMFMediaEvent* event) {
+ STDMETHOD(OnEvent)(DWORD stream_index, IMFMediaEvent* event) override {
NOTIMPLEMENTED();
return S_OK;
}
@@ -213,17 +216,17 @@ VideoCaptureDeviceMFWin::~VideoCaptureDeviceMFWin() {
bool VideoCaptureDeviceMFWin::Init(
const base::win::ScopedComPtr<IMFMediaSource>& source) {
DCHECK(CalledOnValidThread());
- DCHECK(!reader_);
+ DCHECK(!reader_.get());
ScopedComPtr<IMFAttributes> attributes;
MFCreateAttributes(attributes.Receive(), 1);
- DCHECK(attributes);
+ DCHECK(attributes.get());
callback_ = new MFReaderCallback(this);
attributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, callback_.get());
- return SUCCEEDED(MFCreateSourceReaderFromMediaSource(source, attributes,
- reader_.Receive()));
+ return SUCCEEDED(MFCreateSourceReaderFromMediaSource(
+ source.get(), attributes.get(), reader_.Receive()));
}
void VideoCaptureDeviceMFWin::AllocateAndStart(
@@ -238,8 +241,8 @@ void VideoCaptureDeviceMFWin::AllocateAndStart(
CapabilityList capabilities;
HRESULT hr = S_OK;
- if (reader_) {
- hr = FillCapabilities(reader_, &capabilities);
+ if (reader_.get()) {
+ hr = FillCapabilities(reader_.get(), &capabilities);
if (SUCCEEDED(hr)) {
const CapabilityWin found_capability =
GetBestMatchedCapability(params.requested_format, capabilities);
@@ -247,7 +250,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStart(
hr = reader_->GetNativeMediaType(
kFirstVideoStream, found_capability.stream_index, type.Receive());
if (SUCCEEDED(hr)) {
- hr = reader_->SetCurrentMediaType(kFirstVideoStream, NULL, type);
+ hr = reader_->SetCurrentMediaType(kFirstVideoStream, NULL, type.get());
if (SUCCEEDED(hr)) {
hr = reader_->ReadSample(kFirstVideoStream, 0, NULL, NULL, NULL,
NULL);
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.h b/chromium/media/video/capture/win/video_capture_device_mf_win.h
index fc11d19289a..eeb7edf8a4c 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.h
@@ -36,16 +36,15 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
static bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format);
explicit VideoCaptureDeviceMFWin(const Name& device_name);
- virtual ~VideoCaptureDeviceMFWin();
+ ~VideoCaptureDeviceMFWin() override;
// Opens the device driver for this device.
bool Init(const base::win::ScopedComPtr<IMFMediaSource>& source);
// VideoCaptureDevice implementation.
- virtual void AllocateAndStart(const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client)
- override;
- virtual void StopAndDeAllocate() override;
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) override;
+ void StopAndDeAllocate() override;
// Captured new video data.
void OnIncomingCapturedData(const uint8* data,
diff --git a/chromium/media/video/capture/win/video_capture_device_win.cc b/chromium/media/video/capture/win/video_capture_device_win.cc
index 83e1bd3919a..320cba2a72f 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_win.cc
@@ -58,8 +58,8 @@ HRESULT VideoCaptureDeviceWin::GetDeviceFilter(const std::string& device_id,
DCHECK(filter);
ScopedComPtr<ICreateDevEnum> dev_enum;
- HRESULT hr = dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL,
- CLSCTX_INPROC);
+ HRESULT hr =
+ dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC);
if (FAILED(hr))
return hr;
@@ -85,15 +85,16 @@ HRESULT VideoCaptureDeviceWin::GetDeviceFilter(const std::string& device_id,
// Find the device via DevicePath, Description or FriendlyName, whichever is
// available first.
static const wchar_t* kPropertyNames[] = {
- L"DevicePath", L"Description", L"FriendlyName"
- };
+ L"DevicePath", L"Description", L"FriendlyName"};
+
ScopedVariant name;
- for (size_t i = 0;
- i < arraysize(kPropertyNames) && name.type() != VT_BSTR; ++i) {
- prop_bag->Read(kPropertyNames[i], name.Receive(), 0);
+ for (const auto* property_name : kPropertyNames) {
+ if (name.type() != VT_BSTR)
+ prop_bag->Read(property_name, name.Receive(), 0);
}
+
if (name.type() == VT_BSTR) {
- std::string device_path(base::SysWideToUTF8(V_BSTR(&name)));
+ std::string device_path(base::SysWideToUTF8(V_BSTR(name.ptr())));
if (device_path.compare(device_id) == 0) {
// We have found the requested device
hr = moniker->BindToObject(0, 0, IID_IBaseFilter,
@@ -123,7 +124,7 @@ ScopedComPtr<IPin> VideoCaptureDeviceWin::GetPin(IBaseFilter* filter,
ScopedComPtr<IPin> pin;
ScopedComPtr<IEnumPins> pin_enum;
HRESULT hr = filter->EnumPins(pin_enum.Receive());
- if (pin_enum == NULL)
+ if (pin_enum.get() == NULL)
return pin;
// Get first unconnected pin.
@@ -132,15 +133,16 @@ ScopedComPtr<IPin> VideoCaptureDeviceWin::GetPin(IBaseFilter* filter,
PIN_DIRECTION this_pin_dir = static_cast<PIN_DIRECTION>(-1);
hr = pin->QueryDirection(&this_pin_dir);
if (pin_dir == this_pin_dir) {
- if ((category == GUID_NULL || PinMatchesCategory(pin, category)) &&
- (major_type == GUID_NULL || PinMatchesMajorType(pin, major_type))) {
+ if ((category == GUID_NULL || PinMatchesCategory(pin.get(), category)) &&
+ (major_type == GUID_NULL ||
+ PinMatchesMajorType(pin.get(), major_type))) {
return pin;
}
}
pin.Release();
}
- DCHECK(!pin);
+ DCHECK(!pin.get());
return pin;
}
@@ -151,14 +153,14 @@ VideoPixelFormat VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
const GUID& sub_type;
VideoPixelFormat format;
} pixel_formats[] = {
- { kMediaSubTypeI420, PIXEL_FORMAT_I420 },
- { MEDIASUBTYPE_IYUV, PIXEL_FORMAT_I420 },
- { MEDIASUBTYPE_RGB24, PIXEL_FORMAT_RGB24 },
- { MEDIASUBTYPE_YUY2, PIXEL_FORMAT_YUY2 },
- { MEDIASUBTYPE_MJPG, PIXEL_FORMAT_MJPEG },
- { MEDIASUBTYPE_UYVY, PIXEL_FORMAT_UYVY },
- { MEDIASUBTYPE_ARGB32, PIXEL_FORMAT_ARGB },
- { kMediaSubTypeHDYC, PIXEL_FORMAT_UYVY },
+ {kMediaSubTypeI420, PIXEL_FORMAT_I420},
+ {MEDIASUBTYPE_IYUV, PIXEL_FORMAT_I420},
+ {MEDIASUBTYPE_RGB24, PIXEL_FORMAT_RGB24},
+ {MEDIASUBTYPE_YUY2, PIXEL_FORMAT_YUY2},
+ {MEDIASUBTYPE_MJPG, PIXEL_FORMAT_MJPEG},
+ {MEDIASUBTYPE_UYVY, PIXEL_FORMAT_UYVY},
+ {MEDIASUBTYPE_ARGB32, PIXEL_FORMAT_ARGB},
+ {kMediaSubTypeHDYC, PIXEL_FORMAT_UYVY},
};
for (size_t i = 0; i < arraysize(pixel_formats); ++i) {
if (sub_type == pixel_formats[i].sub_type)
@@ -177,7 +179,7 @@ void VideoCaptureDeviceWin::ScopedMediaType::Free() {
return;
DeleteMediaType(media_type_);
- media_type_= NULL;
+ media_type_ = NULL;
}
AM_MEDIA_TYPE** VideoCaptureDeviceWin::ScopedMediaType::Receive() {
@@ -212,59 +214,52 @@ void VideoCaptureDeviceWin::ScopedMediaType::DeleteMediaType(
}
VideoCaptureDeviceWin::VideoCaptureDeviceWin(const Name& device_name)
- : device_name_(device_name),
- state_(kIdle) {
+ : device_name_(device_name), state_(kIdle) {
DetachFromThread();
}
VideoCaptureDeviceWin::~VideoCaptureDeviceWin() {
DCHECK(CalledOnValidThread());
- if (media_control_)
+ if (media_control_.get())
media_control_->Stop();
- if (graph_builder_) {
- if (sink_filter_) {
- graph_builder_->RemoveFilter(sink_filter_);
+ if (graph_builder_.get()) {
+ if (sink_filter_.get()) {
+ graph_builder_->RemoveFilter(sink_filter_.get());
sink_filter_ = NULL;
}
- if (capture_filter_)
- graph_builder_->RemoveFilter(capture_filter_);
-
- if (mjpg_filter_)
- graph_builder_->RemoveFilter(mjpg_filter_);
-
- if (crossbar_filter_)
- graph_builder_->RemoveFilter(crossbar_filter_);
+ if (capture_filter_.get())
+ graph_builder_->RemoveFilter(capture_filter_.get());
}
+
+ if (capture_graph_builder_.get())
+ capture_graph_builder_.Release();
}
bool VideoCaptureDeviceWin::Init() {
DCHECK(CalledOnValidThread());
HRESULT hr;
- if (device_name_.capture_api_type() == Name::DIRECT_SHOW_WDM_CROSSBAR) {
- hr = InstantiateWDMFiltersAndPins();
- } else {
- hr = GetDeviceFilter(device_name_.id(), CLSID_VideoInputDeviceCategory,
- capture_filter_.Receive());
- }
- if (!capture_filter_) {
+ hr = GetDeviceFilter(device_name_.id(), CLSID_VideoInputDeviceCategory,
+ capture_filter_.Receive());
+
+ if (!capture_filter_.get()) {
DLOG(ERROR) << "Failed to create capture filter: "
<< logging::SystemErrorCodeToString(hr);
return false;
}
- output_capture_pin_ =
- GetPin(capture_filter_, PINDIR_OUTPUT, PIN_CATEGORY_CAPTURE, GUID_NULL);
- if (!output_capture_pin_) {
+ output_capture_pin_ = GetPin(capture_filter_.get(), PINDIR_OUTPUT,
+ PIN_CATEGORY_CAPTURE, GUID_NULL);
+ if (!output_capture_pin_.get()) {
DLOG(ERROR) << "Failed to get capture output pin";
return false;
}
// Create the sink filter used for receiving Captured frames.
sink_filter_ = new SinkFilter(this);
- if (sink_filter_ == NULL) {
+ if (sink_filter_.get() == NULL) {
DLOG(ERROR) << "Failed to create send filter";
return false;
}
@@ -279,33 +274,61 @@ bool VideoCaptureDeviceWin::Init() {
return false;
}
- hr = graph_builder_.QueryInterface(media_control_.Receive());
+ hr = capture_graph_builder_.CreateInstance(CLSID_CaptureGraphBuilder2, NULL,
+ CLSCTX_INPROC);
if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to create media control builder: "
+ DLOG(ERROR) << "Failed to create the Capture Graph Builder: "
<< logging::SystemErrorCodeToString(hr);
return false;
}
- hr = graph_builder_->AddFilter(capture_filter_, NULL);
+ hr = capture_graph_builder_->SetFiltergraph(graph_builder_.get());
if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to add the capture device to the graph: "
+ DLOG(ERROR) << "Failed to give graph to capture graph builder: "
<< logging::SystemErrorCodeToString(hr);
return false;
}
- if (device_name_.capture_api_type() == Name::DIRECT_SHOW_WDM_CROSSBAR &&
- FAILED(AddWDMCrossbarFilterToGraphAndConnect())) {
- DLOG(ERROR) << "Failed to add the WDM Crossbar filter to the graph.";
+ hr = graph_builder_.QueryInterface(media_control_.Receive());
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to create media control builder: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ hr = graph_builder_->AddFilter(capture_filter_.get(), NULL);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to add the capture device to the graph: "
+ << logging::SystemErrorCodeToString(hr);
return false;
}
- hr = graph_builder_->AddFilter(sink_filter_, NULL);
+ hr = graph_builder_->AddFilter(sink_filter_.get(), NULL);
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to add the send filter to the graph: "
<< logging::SystemErrorCodeToString(hr);
return false;
}
+ // The following code builds the upstream portions of the graph,
+ // for example if a capture device uses a Windows Driver Model (WDM)
+ // driver, the graph may require certain filters upstream from the
+ // WDM Video Capture filter, such as a TV Tuner filter or an Analog
+ // Video Crossbar filter. We try using the more prevalent
+ // MEDIATYPE_Interleaved first.
+ base::win::ScopedComPtr<IAMStreamConfig> stream_config;
+
+ hr = capture_graph_builder_->FindInterface(
+ &PIN_CATEGORY_CAPTURE, &MEDIATYPE_Interleaved, capture_filter_.get(),
+ IID_IAMStreamConfig, (void**)stream_config.Receive());
+ if (FAILED(hr)) {
+ hr = capture_graph_builder_->FindInterface(
+ &PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, capture_filter_.get(),
+ IID_IAMStreamConfig, (void**)stream_config.Receive());
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to find CapFilter:IAMStreamConfig: "
+ << logging::SystemErrorCodeToString(hr);
+ }
+
return CreateCapabilityMap();
}
@@ -321,12 +344,11 @@ void VideoCaptureDeviceWin::AllocateAndStart(
// Get the camera capability that best match the requested format.
const CapabilityWin found_capability =
GetBestMatchedCapability(params.requested_format, capabilities_);
- VideoCaptureFormat format = found_capability.supported_format;
// Reduce the frame rate if the requested frame rate is lower
// than the capability.
- format.frame_rate =
- std::min(format.frame_rate, params.requested_format.frame_rate);
+ float frame_rate = std::min(found_capability.supported_format.frame_rate,
+ params.requested_format.frame_rate);
ScopedComPtr<IAMStreamConfig> stream_config;
HRESULT hr = output_capture_pin_.QueryInterface(stream_config.Receive());
@@ -348,64 +370,40 @@ void VideoCaptureDeviceWin::AllocateAndStart(
// Get the windows capability from the capture device.
// GetStreamCaps can return S_FALSE which we consider an error. Therefore the
// FAILED macro can't be used.
- hr = stream_config->GetStreamCaps(
- found_capability.stream_index, media_type.Receive(), caps.get());
+ hr = stream_config->GetStreamCaps(found_capability.stream_index,
+ media_type.Receive(), caps.get());
if (hr != S_OK) {
SetErrorState("Failed to get capture device capabilities");
return;
- } else {
- if (media_type->formattype == FORMAT_VideoInfo) {
- VIDEOINFOHEADER* h =
- reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
- if (format.frame_rate > 0)
- h->AvgTimePerFrame = kSecondsToReferenceTime / format.frame_rate;
- }
- // Set the sink filter to request this format.
- sink_filter_->SetRequestedMediaFormat(format);
- // Order the capture device to use this format.
- hr = stream_config->SetFormat(media_type.get());
- if (FAILED(hr)) {
- // TODO(grunell): Log the error. http://crbug.com/405016.
- SetErrorState("Failed to set capture device output format");
- return;
- }
}
-
- if (format.pixel_format == PIXEL_FORMAT_MJPEG && !mjpg_filter_.get()) {
- // Create MJPG filter if we need it.
- hr = mjpg_filter_.CreateInstance(CLSID_MjpegDec, NULL, CLSCTX_INPROC);
-
- if (SUCCEEDED(hr)) {
- input_mjpg_pin_ = GetPin(mjpg_filter_, PINDIR_INPUT, GUID_NULL,
- GUID_NULL);
- output_mjpg_pin_ = GetPin(mjpg_filter_, PINDIR_OUTPUT, GUID_NULL,
- GUID_NULL);
- hr = graph_builder_->AddFilter(mjpg_filter_, NULL);
- }
-
- if (FAILED(hr)) {
- mjpg_filter_.Release();
- input_mjpg_pin_.Release();
- output_mjpg_pin_.Release();
- }
+ if (media_type->formattype == FORMAT_VideoInfo) {
+ VIDEOINFOHEADER* h =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ if (frame_rate > 0)
+ h->AvgTimePerFrame = kSecondsToReferenceTime / frame_rate;
+ }
+ // Set the sink filter to request this format.
+ sink_filter_->SetRequestedMediaFormat(
+ found_capability.supported_format.pixel_format, frame_rate,
+ found_capability.info_header);
+ // Order the capture device to use this format.
+ hr = stream_config->SetFormat(media_type.get());
+ if (FAILED(hr)) {
+ // TODO(grunell): Log the error. http://crbug.com/405016.
+ SetErrorState("Failed to set capture device output format");
+ return;
}
SetAntiFlickerInCaptureFilter();
- if (format.pixel_format == PIXEL_FORMAT_MJPEG && mjpg_filter_.get()) {
- // Connect the camera to the MJPEG decoder.
- hr = graph_builder_->ConnectDirect(output_capture_pin_, input_mjpg_pin_,
- NULL);
- // Connect the MJPEG filter to the Capture filter.
- hr += graph_builder_->ConnectDirect(output_mjpg_pin_, input_sink_pin_,
- NULL);
- } else if (media_type->subtype == kMediaSubTypeHDYC) {
+ if (media_type->subtype == kMediaSubTypeHDYC) {
// HDYC pixel format, used by the DeckLink capture card, needs an AVI
// decompressor filter after source, let |graph_builder_| add it.
- hr = graph_builder_->Connect(output_capture_pin_, input_sink_pin_);
+ hr = graph_builder_->Connect(output_capture_pin_.get(),
+ input_sink_pin_.get());
} else {
- hr = graph_builder_->ConnectDirect(output_capture_pin_, input_sink_pin_,
- NULL);
+ hr = graph_builder_->ConnectDirect(output_capture_pin_.get(),
+ input_sink_pin_.get(), NULL);
}
if (FAILED(hr)) {
@@ -415,8 +413,9 @@ void VideoCaptureDeviceWin::AllocateAndStart(
hr = media_control_->Pause();
if (FAILED(hr)) {
- SetErrorState("Failed to Pause the Capture device. "
- "Is it already occupied?");
+ SetErrorState(
+ "Failed to Pause the Capture device. "
+ "Is it already occupied?");
return;
}
@@ -445,32 +444,17 @@ void VideoCaptureDeviceWin::StopAndDeAllocate() {
return;
}
- graph_builder_->Disconnect(output_capture_pin_);
- graph_builder_->Disconnect(input_sink_pin_);
+ graph_builder_->Disconnect(output_capture_pin_.get());
+ graph_builder_->Disconnect(input_sink_pin_.get());
- // If the _mjpg filter exist disconnect it even if it has not been used.
- if (mjpg_filter_) {
- graph_builder_->Disconnect(input_mjpg_pin_);
- graph_builder_->Disconnect(output_mjpg_pin_);
- }
- if (crossbar_filter_) {
- graph_builder_->Disconnect(analog_video_input_pin_);
- graph_builder_->Disconnect(crossbar_video_output_pin_);
- }
-
- if (FAILED(hr)) {
- SetErrorState("Failed to Stop the Capture device");
- return;
- }
client_.reset();
state_ = kIdle;
}
// Implements SinkFilterObserver::SinkFilterObserver.
-void VideoCaptureDeviceWin::FrameReceived(const uint8* buffer,
- int length) {
- client_->OnIncomingCapturedData(
- buffer, length, capture_format_, 0, base::TimeTicks::Now());
+void VideoCaptureDeviceWin::FrameReceived(const uint8* buffer, int length) {
+ client_->OnIncomingCapturedData(buffer, length, capture_format_, 0,
+ base::TimeTicks::Now());
}
bool VideoCaptureDeviceWin::CreateCapabilityMap() {
@@ -500,8 +484,8 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
scoped_ptr<BYTE[]> caps(new BYTE[size]);
for (int stream_index = 0; stream_index < count; ++stream_index) {
ScopedMediaType media_type;
- hr = stream_config->GetStreamCaps(
- stream_index, media_type.Receive(), caps.get());
+ hr = stream_config->GetStreamCaps(stream_index, media_type.Receive(),
+ caps.get());
// GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
// macros here since they'll trigger incorrectly.
if (hr != S_OK) {
@@ -525,20 +509,21 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
// Try to get a better |time_per_frame| from IAMVideoControl. If not, use
// the value from VIDEOINFOHEADER.
REFERENCE_TIME time_per_frame = h->AvgTimePerFrame;
- if (video_control) {
+ if (video_control.get()) {
ScopedCoMem<LONGLONG> max_fps;
LONG list_size = 0;
const SIZE size = {format.frame_size.width(),
format.frame_size.height()};
- hr = video_control->GetFrameRateList(
- output_capture_pin_, stream_index, size, &list_size, &max_fps);
+ hr = video_control->GetFrameRateList(output_capture_pin_.get(),
+ stream_index, size, &list_size,
+ &max_fps);
// Can't assume the first value will return the max fps.
// Sometimes |list_size| will be > 0, but max_fps will be NULL. Some
// drivers may return an HRESULT of S_FALSE which SUCCEEDED() translates
// into success, so explicitly check S_OK. See http://crbug.com/306237.
if (hr == S_OK && list_size > 0 && max_fps) {
- time_per_frame = *std::min_element(max_fps.get(),
- max_fps.get() + list_size);
+ time_per_frame =
+ *std::min_element(max_fps.get(), max_fps.get() + list_size);
}
}
@@ -547,7 +532,7 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
? (kSecondsToReferenceTime / static_cast<float>(time_per_frame))
: 0.0;
- capabilities_.emplace_back(stream_index, format);
+ capabilities_.emplace_back(stream_index, format, h->bmiHeader);
}
}
@@ -564,9 +549,11 @@ void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
ScopedComPtr<IKsPropertySet> ks_propset;
DWORD type_support = 0;
HRESULT hr;
- if (SUCCEEDED(hr = ks_propset.QueryFrom(capture_filter_)) &&
- SUCCEEDED(hr = ks_propset->QuerySupported(PROPSETID_VIDCAP_VIDEOPROCAMP,
- KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY, &type_support)) &&
+ if (SUCCEEDED(hr = ks_propset.QueryFrom(capture_filter_.get())) &&
+ SUCCEEDED(hr = ks_propset->QuerySupported(
+ PROPSETID_VIDCAP_VIDEOPROCAMP,
+ KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY,
+ &type_support)) &&
(type_support & KSPROPERTY_SUPPORT_SET)) {
KSPROPERTY_VIDEOPROCAMP_S data = {};
data.Property.Set = PROPSETID_VIDCAP_VIDEOPROCAMP;
@@ -575,8 +562,8 @@ void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
data.Value = (power_line_frequency == kPowerLine50Hz) ? 1 : 2;
data.Flags = KSPROPERTY_VIDEOPROCAMP_FLAGS_MANUAL;
hr = ks_propset->Set(PROPSETID_VIDCAP_VIDEOPROCAMP,
- KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY,
- &data, sizeof(data), &data, sizeof(data));
+ KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY, &data,
+ sizeof(data), &data, sizeof(data));
DLOG_IF(ERROR, FAILED(hr)) << "Anti-flicker setting failed: "
<< logging::SystemErrorCodeToString(hr);
DVLOG_IF(2, SUCCEEDED(hr)) << "Anti-flicker set correctly.";
@@ -585,62 +572,6 @@ void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
}
}
-// Instantiate a WDM Crossbar Filter and the associated WDM Capture Filter,
-// extract the correct pins from each. The necessary pins are device specific
-// and usually the first Crossbar output pin, with a name similar to "Video
-// Decoder Out" and the first Capture input pin, with a name like "Analog Video
-// In". These pins have no special Category.
-HRESULT VideoCaptureDeviceWin::InstantiateWDMFiltersAndPins() {
- HRESULT hr = VideoCaptureDeviceWin::GetDeviceFilter(
- device_name_.id(),
- AM_KSCATEGORY_CROSSBAR,
- crossbar_filter_.Receive());
- DPLOG_IF(ERROR, FAILED(hr)) << "Failed to bind WDM Crossbar filter";
- if (FAILED(hr) || !crossbar_filter_)
- return E_FAIL;
-
- // Find Crossbar Video Output Pin: This is usually the first output pin.
- crossbar_video_output_pin_ = GetPin(crossbar_filter_, PINDIR_OUTPUT,
- GUID_NULL, MEDIATYPE_AnalogVideo);
- DLOG_IF(ERROR, !crossbar_video_output_pin_)
- << "Failed to find Crossbar Video Output pin";
- if (!crossbar_video_output_pin_)
- return E_FAIL;
-
- // Use the WDM capture filter associated to the WDM Crossbar filter.
- hr = VideoCaptureDeviceWin::GetDeviceFilter(device_name_.capabilities_id(),
- AM_KSCATEGORY_CAPTURE,
- capture_filter_.Receive());
- DPLOG_IF(ERROR, FAILED(hr)) << "Failed to bind WDM Capture filter";
- if (FAILED(hr) || !capture_filter_)
- return E_FAIL;
-
- // Find the WDM Capture Filter's Analog Video input Pin: usually the first
- // input pin.
- analog_video_input_pin_ = GetPin(capture_filter_, PINDIR_INPUT, GUID_NULL,
- MEDIATYPE_AnalogVideo);
- DLOG_IF(ERROR, !analog_video_input_pin_) << "Failed to find WDM Video Input";
- if (!analog_video_input_pin_)
- return E_FAIL;
- return S_OK;
-}
-
-// Add the WDM Crossbar filter to the Graph and connect the pins previously
-// found.
-HRESULT VideoCaptureDeviceWin::AddWDMCrossbarFilterToGraphAndConnect() {
- HRESULT hr = graph_builder_->AddFilter(crossbar_filter_, NULL);
- DPLOG_IF(ERROR, FAILED(hr)) << "Failed to add Crossbar filter to the graph";
- if (FAILED(hr))
- return E_FAIL;
-
- hr = graph_builder_->ConnectDirect(
- crossbar_video_output_pin_, analog_video_input_pin_, NULL);
- DPLOG_IF(ERROR, FAILED(hr)) << "Failed to plug WDM filters to each other";
- if (FAILED(hr))
- return E_FAIL;
- return S_OK;
-}
-
void VideoCaptureDeviceWin::SetErrorState(const std::string& reason) {
DCHECK(CalledOnValidThread());
state_ = kError;
diff --git a/chromium/media/video/capture/win/video_capture_device_win.h b/chromium/media/video/capture/win/video_capture_device_win.h
index a52aba6aa6b..9315754f687 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_win.h
@@ -19,8 +19,8 @@
#include "base/threading/non_thread_safe.h"
#include "base/threading/thread.h"
#include "base/win/scoped_comptr.h"
+#include "media/base/video_capture_types.h"
#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_types.h"
#include "media/video/capture/win/capability_list_win.h"
#include "media/video/capture/win/sink_filter_win.h"
#include "media/video/capture/win/sink_input_pin_win.h"
@@ -64,15 +64,14 @@ class VideoCaptureDeviceWin
const GUID& sub_type);
explicit VideoCaptureDeviceWin(const Name& device_name);
- virtual ~VideoCaptureDeviceWin();
+ ~VideoCaptureDeviceWin() override;
// Opens the device driver for this device.
bool Init();
// VideoCaptureDevice implementation.
- virtual void AllocateAndStart(
- const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client) override;
- virtual void StopAndDeAllocate() override;
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) override;
+ void StopAndDeAllocate() override;
private:
enum InternalState {
@@ -83,12 +82,10 @@ class VideoCaptureDeviceWin
};
// Implements SinkFilterObserver.
- virtual void FrameReceived(const uint8* buffer, int length);
+ void FrameReceived(const uint8* buffer, int length) override;
bool CreateCapabilityMap();
void SetAntiFlickerInCaptureFilter();
- HRESULT InstantiateWDMFiltersAndPins();
- HRESULT AddWDMCrossbarFilterToGraphAndConnect();
void SetErrorState(const std::string& reason);
Name device_name_;
@@ -96,19 +93,13 @@ class VideoCaptureDeviceWin
scoped_ptr<VideoCaptureDevice::Client> client_;
base::win::ScopedComPtr<IBaseFilter> capture_filter_;
+
base::win::ScopedComPtr<IGraphBuilder> graph_builder_;
+ base::win::ScopedComPtr<ICaptureGraphBuilder2> capture_graph_builder_;
+
base::win::ScopedComPtr<IMediaControl> media_control_;
base::win::ScopedComPtr<IPin> input_sink_pin_;
base::win::ScopedComPtr<IPin> output_capture_pin_;
- // Used when using a MJPEG decoder.
- base::win::ScopedComPtr<IBaseFilter> mjpg_filter_;
- base::win::ScopedComPtr<IPin> input_mjpg_pin_;
- base::win::ScopedComPtr<IPin> output_mjpg_pin_;
- // Used for WDM devices as specified by |device_name_|. These devices need a
- // WDM Crossbar Filter upstream from the Capture filter.
- base::win::ScopedComPtr<IBaseFilter> crossbar_filter_;
- base::win::ScopedComPtr<IPin> crossbar_video_output_pin_;
- base::win::ScopedComPtr<IPin> analog_video_input_pin_;
scoped_refptr<SinkFilter> sink_filter_;
diff --git a/chromium/media/video/fake_video_encode_accelerator.cc b/chromium/media/video/fake_video_encode_accelerator.cc
new file mode 100644
index 00000000000..8e23df5f60c
--- /dev/null
+++ b/chromium/media/video/fake_video_encode_accelerator.cc
@@ -0,0 +1,140 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/fake_video_encode_accelerator.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+
+namespace media {
+
+static const unsigned int kMinimumInputCount = 1;
+static const size_t kMinimumOutputBufferSize = 123456;
+
+FakeVideoEncodeAccelerator::FakeVideoEncodeAccelerator(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner),
+ will_initialization_succeed_(true),
+ client_(NULL),
+ next_frame_is_first_frame_(true),
+ weak_this_factory_(this) {}
+
+FakeVideoEncodeAccelerator::~FakeVideoEncodeAccelerator() {
+ weak_this_factory_.InvalidateWeakPtrs();
+}
+
+VideoEncodeAccelerator::SupportedProfiles
+FakeVideoEncodeAccelerator::GetSupportedProfiles() {
+ SupportedProfiles profiles;
+ SupportedProfile profile;
+ profile.max_resolution.SetSize(1920, 1088);
+ profile.max_framerate_numerator = 30;
+ profile.max_framerate_denominator = 1;
+
+ profile.profile = media::H264PROFILE_MAIN;
+ profiles.push_back(profile);
+ profile.profile = media::VP8PROFILE_ANY;
+ profiles.push_back(profile);
+ return profiles;
+}
+
+bool FakeVideoEncodeAccelerator::Initialize(
+ VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ VideoCodecProfile output_profile,
+ uint32 initial_bitrate,
+ Client* client) {
+ if (!will_initialization_succeed_) {
+ return false;
+ }
+ if (output_profile == VIDEO_CODEC_PROFILE_UNKNOWN ||
+ output_profile > VIDEO_CODEC_PROFILE_MAX) {
+ return false;
+ }
+ client_ = client;
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeVideoEncodeAccelerator::DoRequireBitstreamBuffers,
+ weak_this_factory_.GetWeakPtr(),
+ kMinimumInputCount,
+ input_visible_size,
+ kMinimumOutputBufferSize));
+ return true;
+}
+
+void FakeVideoEncodeAccelerator::Encode(
+ const scoped_refptr<VideoFrame>& frame,
+ bool force_keyframe) {
+ DCHECK(client_);
+ queued_frames_.push(force_keyframe);
+ EncodeTask();
+}
+
+void FakeVideoEncodeAccelerator::UseOutputBitstreamBuffer(
+ const BitstreamBuffer& buffer) {
+ available_buffers_.push_back(buffer);
+ EncodeTask();
+}
+
+void FakeVideoEncodeAccelerator::RequestEncodingParametersChange(
+ uint32 bitrate,
+ uint32 framerate) {
+ stored_bitrates_.push_back(bitrate);
+}
+
+void FakeVideoEncodeAccelerator::Destroy() { delete this; }
+
+void FakeVideoEncodeAccelerator::SendDummyFrameForTesting(bool key_frame) {
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeVideoEncodeAccelerator::DoBitstreamBufferReady,
+ weak_this_factory_.GetWeakPtr(),
+ 0,
+ 23,
+ key_frame));
+}
+
+void FakeVideoEncodeAccelerator::SetWillInitializationSucceed(
+ bool will_initialization_succeed) {
+ will_initialization_succeed_ = will_initialization_succeed;
+}
+
+void FakeVideoEncodeAccelerator::DoRequireBitstreamBuffers(
+ unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) const {
+ client_->RequireBitstreamBuffers(
+ input_count, input_coded_size, output_buffer_size);
+}
+
+void FakeVideoEncodeAccelerator::EncodeTask() {
+ while (!queued_frames_.empty() && !available_buffers_.empty()) {
+ bool force_key_frame = queued_frames_.front();
+ queued_frames_.pop();
+ int32 bitstream_buffer_id = available_buffers_.front().id();
+ available_buffers_.pop_front();
+ bool key_frame = next_frame_is_first_frame_ || force_key_frame;
+ next_frame_is_first_frame_ = false;
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeVideoEncodeAccelerator::DoBitstreamBufferReady,
+ weak_this_factory_.GetWeakPtr(),
+ bitstream_buffer_id,
+ kMinimumOutputBufferSize,
+ key_frame));
+ }
+}
+
+void FakeVideoEncodeAccelerator::DoBitstreamBufferReady(
+ int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) const {
+ client_->BitstreamBufferReady(bitstream_buffer_id,
+ payload_size,
+ key_frame);
+}
+
+} // namespace media
diff --git a/chromium/media/video/fake_video_encode_accelerator.h b/chromium/media/video/fake_video_encode_accelerator.h
new file mode 100644
index 00000000000..c421e96ed9d
--- /dev/null
+++ b/chromium/media/video/fake_video_encode_accelerator.h
@@ -0,0 +1,84 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_FAKE_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_VIDEO_FAKE_VIDEO_ENCODE_ACCELERATOR_H_
+
+#include <list>
+#include <queue>
+#include <vector>
+
+#include "base/memory/weak_ptr.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/media_export.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+} // namespace base
+
+namespace media {
+
+class MEDIA_EXPORT FakeVideoEncodeAccelerator : public VideoEncodeAccelerator {
+ public:
+ explicit FakeVideoEncodeAccelerator(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+ ~FakeVideoEncodeAccelerator() override;
+
+ VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+ bool Initialize(VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ VideoCodecProfile output_profile,
+ uint32 initial_bitrate,
+ Client* client) override;
+ void Encode(const scoped_refptr<VideoFrame>& frame,
+ bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void RequestEncodingParametersChange(uint32 bitrate,
+ uint32 framerate) override;
+ void Destroy() override;
+
+ const std::vector<uint32>& stored_bitrates() const {
+ return stored_bitrates_;
+ }
+ void SendDummyFrameForTesting(bool key_frame);
+ void SetWillInitializationSucceed(bool will_initialization_succeed);
+
+ private:
+ void DoRequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) const;
+ void EncodeTask();
+ void DoBitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) const;
+
+ // Our original (constructor) calling message loop used for all tasks.
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ std::vector<uint32> stored_bitrates_;
+ bool will_initialization_succeed_;
+
+ VideoEncodeAccelerator::Client* client_;
+
+ // Keeps track of if the current frame is the first encoded frame. This
+ // is used to force a fake key frame for the first encoded frame.
+ bool next_frame_is_first_frame_;
+
+ // A queue containing the necessary data for incoming frames. The boolean
+ // represent whether the queued frame should force a key frame.
+ std::queue<bool> queued_frames_;
+
+ // A list of buffers available for putting fake encoded frames in.
+ std::list<BitstreamBuffer> available_buffers_;
+
+ base::WeakPtrFactory<FakeVideoEncodeAccelerator> weak_this_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeVideoEncodeAccelerator);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_FAKE_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
new file mode 100644
index 00000000000..f9570933036
--- /dev/null
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
@@ -0,0 +1,352 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/gpu_memory_buffer_video_frame_pool.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+#include <list>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/containers/stack_container.h"
+#include "base/location.h"
+#include "base/memory/linked_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/trace_event/trace_event.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "media/renderers/gpu_video_accelerator_factories.h"
+
+namespace media {
+
+// Implementation of a pool of GpuMemoryBuffers used to back VideoFrames.
+class GpuMemoryBufferVideoFramePool::PoolImpl
+ : public base::RefCountedThreadSafe<
+ GpuMemoryBufferVideoFramePool::PoolImpl> {
+ public:
+ // |task_runner| is associated to the thread where the context of
+ // GLES2Interface returned by |gpu_factories| lives.
+ // |gpu_factories| is an interface to GPU related operation and can be
+ // null.
+ PoolImpl(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories)
+ : task_runner_(task_runner), gpu_factories_(gpu_factories) {}
+
+ // Takes a software VideoFrame and returns a VideoFrame backed by native
+ // textures if possible.
+ // The data contained in video_frame is copied into the returned frame.
+ scoped_refptr<VideoFrame> CreateHardwareFrame(
+ const scoped_refptr<VideoFrame>& video_frame);
+
+ private:
+ friend class base::RefCountedThreadSafe<
+ GpuMemoryBufferVideoFramePool::PoolImpl>;
+ ~PoolImpl();
+
+ // Resource to represent a plane.
+ struct PlaneResource {
+ gfx::Size size;
+ scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer;
+ unsigned texture_id = 0u;
+ unsigned image_id = 0u;
+ gpu::Mailbox mailbox;
+ };
+
+ // All the resources needed to compose a frame.
+ struct FrameResources {
+ FrameResources(VideoFrame::Format format, const gfx::Size& size)
+ : format(format), size(size) {}
+ bool in_use = true;
+ VideoFrame::Format format;
+ gfx::Size size;
+ PlaneResource plane_resources[VideoFrame::kMaxPlanes];
+ };
+
+ // Return true if |resources| can be used to represent a frame for
+ // specific |format| and |size|.
+ static bool AreFrameResourcesCompatible(const FrameResources* resources,
+ const gfx::Size& size,
+ VideoFrame::Format format) {
+ return size == resources->size && format == resources->format;
+ }
+
+ // Get the resources needed for a frame out of the pool, or create them if
+ // necessary.
+ // This also drops the LRU resources that can't be reuse for this frame.
+ FrameResources* GetOrCreateFrameResources(const gfx::Size& size,
+ VideoFrame::Format format);
+
+ // Callback called when a VideoFrame generated with GetFrameResources is no
+ // longer referenced.
+ // This could be called by any thread.
+ void MailboxHoldersReleased(FrameResources* frame_resources,
+ uint32 sync_point);
+
+ // Return frame resources to the pool. This has to be called on the thread
+ // where |task_runner| is current.
+ void ReturnFrameResources(FrameResources* frame_resources);
+
+ // Delete resources. This has to be called on the thread where |task_runner|
+ // is current.
+ static void DeleteFrameResources(
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
+ FrameResources* frame_resources);
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<GpuVideoAcceleratorFactories> gpu_factories_;
+
+ // Pool of resources.
+ std::list<FrameResources*> resources_pool_;
+
+ unsigned texture_target_ = GL_TEXTURE_2D;
+ DISALLOW_COPY_AND_ASSIGN(PoolImpl);
+};
+
+namespace {
+
+// Copy a buffer info a GpuMemoryBuffer.
+// |bytes_per_row| is expected to be less or equal than the strides of the two
+// buffers.
+void CopyPlaneToGpuMemoryBuffer(int rows,
+ int bytes_per_row,
+ const uint8* source,
+ int source_stride,
+ gfx::GpuMemoryBuffer* buffer) {
+ TRACE_EVENT2("media", "CopyPlaneToGpuMemoryBuffer", "bytes_per_row",
+ bytes_per_row, "rows", rows);
+
+ DCHECK(buffer);
+ DCHECK(source);
+ void* data = nullptr;
+ CHECK(buffer->Map(&data));
+ uint8* mapped_buffer = static_cast<uint8*>(data);
+ int dest_stride = 0;
+ buffer->GetStride(&dest_stride);
+ DCHECK_NE(dest_stride, 0);
+ DCHECK_LE(bytes_per_row, std::abs(dest_stride));
+ DCHECK_LE(bytes_per_row, source_stride);
+ for (int row = 0; row < rows; ++row) {
+ memcpy(mapped_buffer + dest_stride * row, source + source_stride * row,
+ bytes_per_row);
+ }
+ buffer->Unmap();
+}
+
+} // unnamed namespace
+
+// Creates a VideoFrame backed by native textures starting from a software
+// VideoFrame.
+// The data contained in video_frame is copied into the returned VideoFrame.
+scoped_refptr<VideoFrame>
+GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
+ const scoped_refptr<VideoFrame>& video_frame) {
+ if (!gpu_factories_)
+ return video_frame;
+
+ if (!gpu_factories_->IsTextureRGSupported())
+ return video_frame;
+
+ gpu::gles2::GLES2Interface* gles2 = gpu_factories_->GetGLES2Interface();
+ if (!gles2)
+ return video_frame;
+
+ VideoFrame::Format format = video_frame->format();
+ size_t planes = VideoFrame::NumPlanes(format);
+ DCHECK(video_frame->visible_rect().origin().IsOrigin());
+ gfx::Size size = video_frame->visible_rect().size();
+ gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
+
+ // Acquire resources. Incompatible ones will be dropped from the pool.
+ FrameResources* frame_resources = GetOrCreateFrameResources(size, format);
+
+ // Set up the planes copying data into it and creating the mailboxes needed
+ // to refer to the textures.
+ for (size_t i = 0; i < planes; ++i) {
+ PlaneResource& plane_resource = frame_resources->plane_resources[i];
+ CopyPlaneToGpuMemoryBuffer(VideoFrame::Rows(i, format, size.height()),
+ VideoFrame::RowBytes(i, format, size.width()),
+ video_frame->data(i), video_frame->stride(i),
+ plane_resource.gpu_memory_buffer.get());
+
+ // Bind the texture and create or rebind the image.
+ gles2->BindTexture(texture_target_, plane_resource.texture_id);
+
+ if (plane_resource.gpu_memory_buffer && !plane_resource.image_id) {
+ const size_t width = VideoFrame::Columns(i, format, size.width());
+ const size_t height = VideoFrame::Rows(i, format, size.height());
+ plane_resource.image_id = gles2->CreateImageCHROMIUM(
+ plane_resource.gpu_memory_buffer->AsClientBuffer(), width, height,
+ GL_R8_EXT);
+ } else {
+ gles2->ReleaseTexImage2DCHROMIUM(texture_target_,
+ plane_resource.image_id);
+ }
+ gles2->BindTexImage2DCHROMIUM(texture_target_, plane_resource.image_id);
+ mailbox_holders[i] =
+ gpu::MailboxHolder(plane_resource.mailbox, texture_target_, 0);
+ }
+
+ // Insert a sync_point, this is needed to make sure that the textures the
+ // mailboxes refer to will be used only after all the previous commands posted
+ // in the command buffer have been processed.
+ unsigned sync_point = gles2->InsertSyncPointCHROMIUM();
+ for (size_t i = 0; i < planes; ++i) {
+ mailbox_holders[i].sync_point = sync_point;
+ }
+
+ // Create the VideoFrame backed by native textures.
+ return VideoFrame::WrapYUV420NativeTextures(
+ mailbox_holders[VideoFrame::kYPlane],
+ mailbox_holders[VideoFrame::kUPlane],
+ mailbox_holders[VideoFrame::kVPlane],
+ base::Bind(&PoolImpl::MailboxHoldersReleased, this, frame_resources),
+ size, video_frame->visible_rect(), video_frame->natural_size(),
+ video_frame->timestamp(), video_frame->allow_overlay());
+}
+
+// Destroy all the resources posting one task per FrameResources
+// to the |task_runner_|.
+GpuMemoryBufferVideoFramePool::PoolImpl::~PoolImpl() {
+ // Delete all the resources on the media thread.
+ while (!resources_pool_.empty()) {
+ FrameResources* frame_resources = resources_pool_.front();
+ resources_pool_.pop_front();
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(&PoolImpl::DeleteFrameResources, gpu_factories_,
+ base::Owned(frame_resources)));
+ }
+}
+
+// Tries to find the resources in the pool or create them.
+// Incompatible resources will be dropped.
+GpuMemoryBufferVideoFramePool::PoolImpl::FrameResources*
+GpuMemoryBufferVideoFramePool::PoolImpl::GetOrCreateFrameResources(
+ const gfx::Size& size,
+ VideoFrame::Format format) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ auto it = resources_pool_.begin();
+ while (it != resources_pool_.end()) {
+ FrameResources* frame_resources = *it;
+ if (!frame_resources->in_use) {
+ if (AreFrameResourcesCompatible(frame_resources, size, format)) {
+ frame_resources->in_use = true;
+ return frame_resources;
+ } else {
+ resources_pool_.erase(it++);
+ DeleteFrameResources(gpu_factories_, frame_resources);
+ delete frame_resources;
+ }
+ } else {
+ it++;
+ }
+ }
+
+ // Create the resources.
+ gpu::gles2::GLES2Interface* gles2 = gpu_factories_->GetGLES2Interface();
+ DCHECK(gles2);
+ gles2->ActiveTexture(GL_TEXTURE0);
+ size_t planes = VideoFrame::NumPlanes(format);
+ FrameResources* frame_resources = new FrameResources(format, size);
+ resources_pool_.push_back(frame_resources);
+ for (size_t i = 0; i < planes; ++i) {
+ PlaneResource& plane_resource = frame_resources->plane_resources[i];
+ const size_t width = VideoFrame::Columns(i, format, size.width());
+ const size_t height = VideoFrame::Rows(i, format, size.height());
+ const gfx::Size plane_size(width, height);
+ plane_resource.gpu_memory_buffer = gpu_factories_->AllocateGpuMemoryBuffer(
+ plane_size, gfx::GpuMemoryBuffer::R_8, gfx::GpuMemoryBuffer::MAP);
+
+ gles2->GenTextures(1, &plane_resource.texture_id);
+ gles2->BindTexture(texture_target_, plane_resource.texture_id);
+ gles2->TexParameteri(texture_target_, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ gles2->TexParameteri(texture_target_, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ gles2->TexParameteri(texture_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ gles2->TexParameteri(texture_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ gles2->GenMailboxCHROMIUM(plane_resource.mailbox.name);
+ gles2->ProduceTextureCHROMIUM(texture_target_, plane_resource.mailbox.name);
+ }
+ return frame_resources;
+}
+
+// static
+void GpuMemoryBufferVideoFramePool::PoolImpl::DeleteFrameResources(
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
+ FrameResources* frame_resources) {
+ // TODO(dcastagna): As soon as the context lost is dealt with in media,
+ // make sure that we won't execute this callback (use a weak pointer to
+ // the old context).
+ gpu::gles2::GLES2Interface* gles2 = gpu_factories->GetGLES2Interface();
+ if (!gles2)
+ return;
+
+ for (PlaneResource& plane_resource : frame_resources->plane_resources) {
+ if (plane_resource.image_id)
+ gles2->DestroyImageCHROMIUM(plane_resource.image_id);
+ if (plane_resource.texture_id)
+ gles2->DeleteTextures(1, &plane_resource.texture_id);
+ }
+}
+
+// Called when a VideoFrame is no longer references.
+void GpuMemoryBufferVideoFramePool::PoolImpl::MailboxHoldersReleased(
+ FrameResources* frame_resources,
+ uint32 sync_point) {
+ // Return the resource on the media thread.
+ task_runner_->PostTask(FROM_HERE, base::Bind(&PoolImpl::ReturnFrameResources,
+ this, frame_resources));
+}
+
+// Put back the resoruces in the pool.
+void GpuMemoryBufferVideoFramePool::PoolImpl::ReturnFrameResources(
+ FrameResources* frame_resources) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ auto it = std::find(resources_pool_.begin(), resources_pool_.end(),
+ frame_resources);
+ DCHECK(it != resources_pool_.end());
+ // We want the pool to behave in a FIFO way.
+ // This minimizes the chances of locking the buffer that might be
+ // still needed for drawing.
+ std::swap(*it, resources_pool_.back());
+ frame_resources->in_use = false;
+}
+
+GpuMemoryBufferVideoFramePool::GpuMemoryBufferVideoFramePool(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories)
+ : pool_impl_(new PoolImpl(task_runner, gpu_factories)) {
+}
+
+GpuMemoryBufferVideoFramePool::~GpuMemoryBufferVideoFramePool() {
+}
+
+scoped_refptr<VideoFrame>
+GpuMemoryBufferVideoFramePool::MaybeCreateHardwareFrame(
+ const scoped_refptr<VideoFrame>& video_frame) {
+ switch (video_frame->format()) {
+ // Supported cases.
+ case VideoFrame::YV12:
+ case VideoFrame::I420:
+ return pool_impl_->CreateHardwareFrame(video_frame);
+ // Unsupported cases.
+ case media::VideoFrame::YV12A:
+ case media::VideoFrame::YV16:
+ case media::VideoFrame::YV12J:
+ case media::VideoFrame::YV12HD:
+ case media::VideoFrame::YV24:
+#if defined(VIDEO_HOLE)
+ case media::VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ case media::VideoFrame::ARGB:
+ case media::VideoFrame::NATIVE_TEXTURE:
+ case media::VideoFrame::UNKNOWN:
+ case media::VideoFrame::NV12:
+ break;
+ }
+ return video_frame;
+}
+
+} // namespace media
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool.h b/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
new file mode 100644
index 00000000000..e01c791aae9
--- /dev/null
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
@@ -0,0 +1,50 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_GPU_MEMORY_BUFFER_VIDEO_FRAME_POOL_H_
+#define MEDIA_VIDEO_GPU_MEMORY_BUFFER_VIDEO_FRAME_POOL_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/video_frame.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+class GpuVideoAcceleratorFactories;
+
+// Interface to a pool of GpuMemoryBuffers/textues/images that can be used to
+// transform software VideoFrames to VideoFrames backed by native textures.
+// The resources used by the VideoFrame created by the pool will be
+// automatically put back into the pool once the frame is destroyed.
+// The pool recycles resources to a void unnecessarily allocating and
+// destroying textures, images and GpuMemoryBuffer that could result
+// in a round trip to the browser/GPU process.
+class MEDIA_EXPORT GpuMemoryBufferVideoFramePool {
+ public:
+ GpuMemoryBufferVideoFramePool(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories);
+ ~GpuMemoryBufferVideoFramePool();
+
+ // Returns a new VideoFrame containing only mailboxes to native resources.
+ // The content of the returned object is copied from the software-allocated
+ // |video_frame|.
+ // If it's not possible to create a new hardware VideoFrame, |video_frame|
+ // itself will be returned.
+ scoped_refptr<VideoFrame> MaybeCreateHardwareFrame(
+ const scoped_refptr<VideoFrame>& video_frame);
+
+ private:
+ class PoolImpl;
+ scoped_refptr<PoolImpl> pool_impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferVideoFramePool);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_GPU_MEMORY_BUFFER_VIDEO_FRAME_POOL_H_
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
new file mode 100644
index 00000000000..0ca4e529c5a
--- /dev/null
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
@@ -0,0 +1,180 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "gpu/command_buffer/client/gles2_interface_stub.h"
+#include "media/base/video_frame.h"
+#include "media/renderers/gpu_video_accelerator_factories.h"
+#include "media/renderers/mock_gpu_video_accelerator_factories.h"
+#include "media/video/gpu_memory_buffer_video_frame_pool.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+namespace {
+class TestGLES2Interface : public gpu::gles2::GLES2InterfaceStub {
+ public:
+ unsigned gen_textures = 0u;
+ void GenTextures(GLsizei n, GLuint* textures) override {
+ DCHECK_EQ(1, n);
+ *textures = ++gen_textures;
+ }
+
+ GLuint InsertSyncPointCHROMIUM() override { return ++sync_point; }
+
+ void GenMailboxCHROMIUM(GLbyte* mailbox) override {
+ *reinterpret_cast<unsigned*>(mailbox) = ++this->mailbox;
+ }
+
+ private:
+ unsigned sync_point = 0u;
+ unsigned mailbox = 0u;
+};
+
+} // unnamed namespace
+
+class GpuMemoryBufferVideoFramePoolTest : public ::testing::Test {
+ public:
+ GpuMemoryBufferVideoFramePoolTest() {}
+ void SetUp() override { gles2_.reset(new TestGLES2Interface); }
+
+ void TearDown() override { base::RunLoop().RunUntilIdle(); }
+
+ static scoped_refptr<media::VideoFrame> CreateTestYUVVideoFrame(
+ int dimension) {
+ const int kDimension = 10;
+ static uint8 y_data[kDimension * kDimension] = {0};
+ static uint8 u_data[kDimension * kDimension / 2] = {0};
+ static uint8 v_data[kDimension * kDimension / 2] = {0};
+
+ DCHECK_LE(dimension, kDimension);
+ gfx::Size size(dimension, dimension);
+
+ return media::VideoFrame::WrapExternalYuvData(
+ media::VideoFrame::YV12, // format
+ size, // coded_size
+ gfx::Rect(size), // visible_rect
+ size, // natural_size
+ size.width(), // y_stride
+ size.width() / 2, // u_stride
+ size.width() / 2, // v_stride
+ y_data, // y_data
+ u_data, // u_data
+ v_data, // v_data
+ base::TimeDelta(), // timestamp,
+ base::Closure()); // no_longer_needed_cb
+ }
+
+ protected:
+ base::MessageLoop media_message_loop_;
+ scoped_ptr<TestGLES2Interface> gles2_;
+};
+
+TEST_F(GpuMemoryBufferVideoFramePoolTest, NoGpuFactoryNoHardwareVideoFrame) {
+ scoped_refptr<VideoFrame> frame = CreateTestYUVVideoFrame(10);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_message_loop_.task_runner(), nullptr));
+
+ scoped_refptr<VideoFrame> frame2 =
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(frame);
+ EXPECT_EQ(frame.get(), frame2.get());
+}
+
+TEST_F(GpuMemoryBufferVideoFramePoolTest, NoTextureRGNoHardwareVideoFrame) {
+ scoped_refptr<VideoFrame> frame = CreateTestYUVVideoFrame(10);
+ scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
+ new MockGpuVideoAcceleratorFactories);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_message_loop_.task_runner(), mock_gpu_factories));
+
+ EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
+ .WillRepeatedly(testing::Return(false));
+ scoped_refptr<VideoFrame> frame2 =
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(frame);
+ EXPECT_EQ(frame.get(), frame2.get());
+}
+
+TEST_F(GpuMemoryBufferVideoFramePoolTest, CreateOneHardwareFrame) {
+ scoped_refptr<VideoFrame> software_frame = CreateTestYUVVideoFrame(10);
+ scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
+ new MockGpuVideoAcceleratorFactories);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_message_loop_.task_runner(), mock_gpu_factories));
+
+ EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
+ .WillRepeatedly(testing::Return(gles2_.get()));
+ EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
+ .WillRepeatedly(testing::Return(true));
+
+ scoped_refptr<VideoFrame> frame =
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ EXPECT_NE(software_frame.get(), frame.get());
+ EXPECT_EQ(3u, gles2_->gen_textures);
+}
+
+TEST_F(GpuMemoryBufferVideoFramePoolTest, ReuseFirstResource) {
+ scoped_refptr<VideoFrame> software_frame = CreateTestYUVVideoFrame(10);
+ scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
+ new MockGpuVideoAcceleratorFactories);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_message_loop_.task_runner(), mock_gpu_factories));
+
+ EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
+ .WillRepeatedly(testing::Return(gles2_.get()));
+ EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
+ .WillRepeatedly(testing::Return(true));
+
+ scoped_refptr<VideoFrame> frame =
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ EXPECT_NE(software_frame.get(), frame.get());
+ gpu::Mailbox mailbox = frame->mailbox_holder(0).mailbox;
+ unsigned sync_point = frame->mailbox_holder(0).sync_point;
+ EXPECT_EQ(3u, gles2_->gen_textures);
+
+ scoped_refptr<VideoFrame> frame2 =
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ EXPECT_NE(software_frame.get(), frame2.get());
+ EXPECT_NE(mailbox, frame2->mailbox_holder(0).mailbox);
+ EXPECT_EQ(6u, gles2_->gen_textures);
+
+ frame = nullptr;
+ frame2 = nullptr;
+ base::RunLoop().RunUntilIdle(); // Run posted closures.
+ frame = gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ EXPECT_NE(software_frame.get(), frame.get());
+ EXPECT_EQ(6u, gles2_->gen_textures);
+ EXPECT_EQ(frame->mailbox_holder(0).mailbox, mailbox);
+ EXPECT_NE(frame->mailbox_holder(0).sync_point, sync_point);
+}
+
+TEST_F(GpuMemoryBufferVideoFramePoolTest, DropResourceWhenSizeIsDifferent) {
+ scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
+ new MockGpuVideoAcceleratorFactories);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_message_loop_.task_runner(), mock_gpu_factories));
+
+ EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
+ .WillRepeatedly(testing::Return(gles2_.get()));
+ EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
+ .WillRepeatedly(testing::Return(true));
+
+ scoped_refptr<VideoFrame> frame =
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ CreateTestYUVVideoFrame(10));
+ EXPECT_EQ(3u, gles2_->gen_textures);
+
+ frame = nullptr;
+ base::RunLoop().RunUntilIdle(); // Run posted closures.
+ frame = gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ CreateTestYUVVideoFrame(4));
+ EXPECT_EQ(6u, gles2_->gen_textures);
+}
+
+} // namespace media
diff --git a/chromium/media/video/h264_poc.cc b/chromium/media/video/h264_poc.cc
new file mode 100644
index 00000000000..d414b1d8403
--- /dev/null
+++ b/chromium/media/video/h264_poc.cc
@@ -0,0 +1,225 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/filters/h264_parser.h"
+#include "media/video/h264_poc.h"
+
+namespace media {
+
+H264POC::H264POC() {
+ Reset();
+}
+
+H264POC::~H264POC() {
+}
+
+void H264POC::Reset() {
+ // It shouldn't be necessary to reset these values, but doing so will improve
+ // reproducibility for buggy streams.
+ ref_pic_order_cnt_msb_ = 0;
+ ref_pic_order_cnt_lsb_ = 0;
+ prev_frame_num_ = 0;
+ prev_frame_num_offset_ = 0;
+}
+
+// Check if a slice includes memory management control operation 5, which
+// results in some |pic_order_cnt| state being cleared.
+static bool HasMMCO5(const media::H264SliceHeader& slice_hdr) {
+ // Require that the frame actually has memory management control operations.
+ if (slice_hdr.nal_ref_idc == 0 ||
+ slice_hdr.idr_pic_flag ||
+ !slice_hdr.adaptive_ref_pic_marking_mode_flag) {
+ return false;
+ }
+
+ for (size_t i = 0; i < arraysize(slice_hdr.ref_pic_marking); i++) {
+ int32_t op = slice_hdr.ref_pic_marking[i].memory_mgmnt_control_operation;
+ if (op == 5)
+ return true;
+
+ // Stop at the end of the list.
+ if (op == 0)
+ return false;
+ }
+
+ // Should not get here, the list is always zero terminated.
+ return false;
+}
+
+bool H264POC::ComputePicOrderCnt(
+ const H264SPS* sps,
+ const H264SliceHeader& slice_hdr,
+ int32_t *pic_order_cnt) {
+ if (slice_hdr.field_pic_flag) {
+ DLOG(ERROR) << "Interlaced frames are not supported";
+ return false;
+ }
+
+ // TODO(sandersd): Handle |gaps_in_frame_num_value|.
+ if (prev_frame_num_ > 0 && prev_frame_num_ < slice_hdr.frame_num - 1) {
+ DLOG(ERROR) << "Gaps in frame_num are not supported";
+ return false;
+ }
+
+ bool mmco5 = HasMMCO5(slice_hdr);
+ int32_t max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ int32_t max_pic_order_cnt_lsb =
+ 1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+ // Based on T-REC-H.264 8.2.1, "Decoding process for picture order
+ // count", available from http://www.itu.int/rec/T-REC-H.264.
+ //
+ // Reorganized slightly from spec pseudocode to handle MMCO5 when storing
+ // state instead of when loading it.
+ switch (sps->pic_order_cnt_type) {
+ case 0: {
+ int32_t prev_pic_order_cnt_msb = ref_pic_order_cnt_msb_;
+ int32_t prev_pic_order_cnt_lsb = ref_pic_order_cnt_lsb_;
+
+ // For an IDR picture, clear the state.
+ if (slice_hdr.idr_pic_flag) {
+ prev_pic_order_cnt_msb = 0;
+ prev_pic_order_cnt_lsb = 0;
+ }
+
+ // 8-3. Derive |pic_order_cnt_msb|, accounting for wrapping which is
+ // detected when |pic_order_cnt_lsb| increases or decreases by at
+ // least half of its maximum.
+ int32_t pic_order_cnt_msb;
+ if ((slice_hdr.pic_order_cnt_lsb < prev_pic_order_cnt_lsb) &&
+ (prev_pic_order_cnt_lsb - slice_hdr.pic_order_cnt_lsb >=
+ max_pic_order_cnt_lsb / 2)) {
+ pic_order_cnt_msb = prev_pic_order_cnt_msb + max_pic_order_cnt_lsb;
+ } else if ((slice_hdr.pic_order_cnt_lsb > prev_pic_order_cnt_lsb) &&
+ (slice_hdr.pic_order_cnt_lsb - prev_pic_order_cnt_lsb >
+ max_pic_order_cnt_lsb / 2)) {
+ pic_order_cnt_msb = prev_pic_order_cnt_msb - max_pic_order_cnt_lsb;
+ } else {
+ pic_order_cnt_msb = prev_pic_order_cnt_msb;
+ }
+
+ // 8-4, 8-5. Derive |top_field_order_count| and |bottom_field_order_cnt|
+ // (assuming no interlacing).
+ int32_t top_foc = pic_order_cnt_msb + slice_hdr.pic_order_cnt_lsb;
+ int32_t bottom_foc = top_foc + slice_hdr.delta_pic_order_cnt_bottom;
+ *pic_order_cnt = std::min(top_foc, bottom_foc);
+
+ // Store state.
+ prev_frame_num_ = slice_hdr.frame_num;
+ if (slice_hdr.nal_ref_idc != 0) {
+ if (mmco5) {
+ ref_pic_order_cnt_msb_ = 0;
+ ref_pic_order_cnt_lsb_ = top_foc;
+ } else {
+ ref_pic_order_cnt_msb_ = pic_order_cnt_msb;
+ ref_pic_order_cnt_lsb_ = slice_hdr.pic_order_cnt_lsb;
+ }
+ }
+
+ break;
+ }
+
+ case 1: {
+ // 8-6. Derive |frame_num_offset|.
+ int32_t frame_num_offset;
+ if (slice_hdr.idr_pic_flag)
+ frame_num_offset = 0;
+ else if (prev_frame_num_ > slice_hdr.frame_num)
+ frame_num_offset = prev_frame_num_offset_ + max_frame_num;
+ else
+ frame_num_offset = prev_frame_num_offset_;
+
+ // 8-7. Derive |abs_frame_num|.
+ int32_t abs_frame_num;
+ if (sps->num_ref_frames_in_pic_order_cnt_cycle != 0)
+ abs_frame_num = frame_num_offset + slice_hdr.frame_num;
+ else
+ abs_frame_num = 0;
+
+ if (slice_hdr.nal_ref_idc == 0 && abs_frame_num > 0)
+ abs_frame_num--;
+
+ // 8-9. Derive |expected_pic_order_cnt| (the |pic_order_cnt| indicated
+ // by the cycle described in the SPS).
+ int32_t expected_pic_order_cnt = 0;
+ if (abs_frame_num > 0) {
+ // 8-8. Derive pic_order_cnt_cycle_cnt and
+ // frame_num_in_pic_order_cnt_cycle.
+ // Moved inside 8-9 to avoid division when this check is not done.
+ if (sps->num_ref_frames_in_pic_order_cnt_cycle == 0) {
+ DLOG(ERROR) << "Invalid num_ref_frames_in_pic_order_cnt_cycle";
+ return false;
+ }
+
+ // H264Parser checks that num_ref_frames_in_pic_order_cnt_cycle < 255.
+ int32_t pic_order_cnt_cycle_cnt =
+ (abs_frame_num - 1) / sps->num_ref_frames_in_pic_order_cnt_cycle;
+ int32_t frame_num_in_pic_order_cnt_cycle =
+ (abs_frame_num - 1) % sps->num_ref_frames_in_pic_order_cnt_cycle;
+
+ // 8-9 continued.
+ expected_pic_order_cnt = pic_order_cnt_cycle_cnt *
+ sps->expected_delta_per_pic_order_cnt_cycle;
+ for (int32_t i = 0; i <= frame_num_in_pic_order_cnt_cycle; i++)
+ expected_pic_order_cnt += sps->offset_for_ref_frame[i];
+ }
+ if (slice_hdr.nal_ref_idc == 0)
+ expected_pic_order_cnt += sps->offset_for_non_ref_pic;
+
+ // 8-10. Derive |top_field_order_cnt| and |bottom_field_order_cnt|
+ // (assuming no interlacing).
+ int32_t top_foc = expected_pic_order_cnt + slice_hdr.delta_pic_order_cnt0;
+ int32_t bottom_foc = top_foc + sps->offset_for_top_to_bottom_field +
+ slice_hdr.delta_pic_order_cnt1;
+ *pic_order_cnt = std::min(top_foc, bottom_foc);
+
+ // Store state.
+ prev_frame_num_ = slice_hdr.frame_num;
+ prev_frame_num_offset_ = frame_num_offset;
+ if (mmco5)
+ prev_frame_num_offset_ = 0;
+
+ break;
+ }
+
+ case 2: {
+ // 8-11. Derive |frame_num_offset|.
+ int32_t frame_num_offset;
+ if (slice_hdr.idr_pic_flag)
+ frame_num_offset = 0;
+ else if (prev_frame_num_ > slice_hdr.frame_num)
+ frame_num_offset = prev_frame_num_offset_ + max_frame_num;
+ else
+ frame_num_offset = prev_frame_num_offset_;
+
+ // 8-12, 8-13. Derive |temp_pic_order_count| (it's always the
+ // |pic_order_cnt|, regardless of interlacing).
+ if (slice_hdr.idr_pic_flag)
+ *pic_order_cnt = 0;
+ else if (slice_hdr.nal_ref_idc == 0)
+ *pic_order_cnt = 2 * (frame_num_offset + slice_hdr.frame_num) - 1;
+ else
+ *pic_order_cnt = 2 * (frame_num_offset + slice_hdr.frame_num);
+
+ // Store state.
+ prev_frame_num_ = slice_hdr.frame_num;
+ prev_frame_num_offset_ = frame_num_offset;
+ if (mmco5)
+ prev_frame_num_offset_ = 0;
+
+ break;
+ }
+
+ default:
+ DLOG(ERROR) << "Invalid pic_order_cnt_type: " << sps->pic_order_cnt_type;
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/video/h264_poc.h b/chromium/media/video/h264_poc.h
new file mode 100644
index 00000000000..5457231a323
--- /dev/null
+++ b/chromium/media/video/h264_poc.h
@@ -0,0 +1,40 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_H264_POC_H_
+#define MEDIA_VIDEO_H264_POC_H_
+
+namespace media {
+
+struct H264SPS;
+struct H264SliceHeader;
+
+class MEDIA_EXPORT H264POC {
+ public:
+ H264POC();
+ ~H264POC();
+
+ // Compute the picture order count for a slice, storing the result into
+ // |*pic_order_cnt|.
+ bool ComputePicOrderCnt(
+ const H264SPS* sps,
+ const H264SliceHeader& slice_hdr,
+ int32_t* pic_order_cnt);
+
+ // Reset computation state. It's best (although not strictly required) to call
+ // this after a seek.
+ void Reset();
+
+ private:
+ int32_t ref_pic_order_cnt_msb_;
+ int32_t ref_pic_order_cnt_lsb_;
+ int32_t prev_frame_num_;
+ int32_t prev_frame_num_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264POC);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_H264_POC_H_
diff --git a/chromium/media/video/h264_poc_unittest.cc b/chromium/media/video/h264_poc_unittest.cc
new file mode 100644
index 00000000000..67baf9378be
--- /dev/null
+++ b/chromium/media/video/h264_poc_unittest.cc
@@ -0,0 +1,245 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/h264_parser.h"
+#include "media/video/h264_poc.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class H264POCTest : public testing::Test {
+ public:
+ H264POCTest() : sps_(), slice_hdr_() {
+ // Default every frame to be a reference frame.
+ slice_hdr_.nal_ref_idc = 1;
+ }
+
+ protected:
+ bool ComputePOC() {
+ return h264_poc_.ComputePicOrderCnt(&sps_, slice_hdr_, &poc_);
+ }
+
+ // Also sets as a reference frame and unsets IDR, which is required for
+ // memory management control operations to be parsed.
+ void SetMMCO5() {
+ slice_hdr_.nal_ref_idc = 1;
+ slice_hdr_.idr_pic_flag = false;
+ slice_hdr_.adaptive_ref_pic_marking_mode_flag = true;
+ slice_hdr_.ref_pic_marking[0].memory_mgmnt_control_operation = 6;
+ slice_hdr_.ref_pic_marking[1].memory_mgmnt_control_operation = 5;
+ slice_hdr_.ref_pic_marking[2].memory_mgmnt_control_operation = 0;
+ }
+
+ int32_t poc_;
+
+ H264SPS sps_;
+ H264SliceHeader slice_hdr_;
+ H264POC h264_poc_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264POCTest);
+};
+
+TEST_F(H264POCTest, PicOrderCntType0) {
+ sps_.pic_order_cnt_type = 0;
+ sps_.log2_max_pic_order_cnt_lsb_minus4 = 0; // 16
+
+ // Initial IDR with POC 0.
+ slice_hdr_.idr_pic_flag = true;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(0, poc_);
+
+ // Ref frame with POC lsb 8.
+ slice_hdr_.idr_pic_flag = false;
+ slice_hdr_.pic_order_cnt_lsb = 8;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(8, poc_);
+
+ // Ref frame with POC lsb 0. This should be detected as wrapping, as the
+ // (negative) gap is at least half the maximum.
+ slice_hdr_.pic_order_cnt_lsb = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(16, poc_);
+
+ // Ref frame with POC lsb 9. This should be detected as negative wrapping,
+ // as the (positive) gap is more than half the maximum.
+ slice_hdr_.pic_order_cnt_lsb = 9;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(9, poc_);
+}
+
+TEST_F(H264POCTest, PicOrderCntType0_WithMMCO5) {
+ sps_.pic_order_cnt_type = 0;
+ sps_.log2_max_pic_order_cnt_lsb_minus4 = 0; // 16
+
+ // Initial IDR with POC 0.
+ slice_hdr_.idr_pic_flag = true;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(0, poc_);
+
+ // Skip ahead.
+ slice_hdr_.idr_pic_flag = false;
+ slice_hdr_.pic_order_cnt_lsb = 8;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(8, poc_);
+
+ slice_hdr_.pic_order_cnt_lsb = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(16, poc_);
+
+ slice_hdr_.pic_order_cnt_lsb = 8;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(24, poc_);
+
+ SetMMCO5();
+ slice_hdr_.pic_order_cnt_lsb = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(32, poc_);
+
+ // Due to the MMCO5 above, this is relative to 0, but also detected as
+ // positive wrapping.
+ slice_hdr_.pic_order_cnt_lsb = 8;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(24, poc_);
+}
+
+TEST_F(H264POCTest, PicOrderCntType1) {
+ sps_.pic_order_cnt_type = 1;
+ sps_.log2_max_frame_num_minus4 = 0; // 16
+ sps_.num_ref_frames_in_pic_order_cnt_cycle = 2;
+ sps_.expected_delta_per_pic_order_cnt_cycle = 3;
+ sps_.offset_for_ref_frame[0] = 1;
+ sps_.offset_for_ref_frame[1] = 2;
+
+ // Initial IDR with POC 0.
+ slice_hdr_.idr_pic_flag = true;
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(0, poc_);
+
+ // Ref frame.
+ slice_hdr_.idr_pic_flag = false;
+ slice_hdr_.frame_num = 1;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(1, poc_);
+
+ // Ref frame.
+ slice_hdr_.frame_num = 2;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(3, poc_);
+
+ // Ref frame.
+ slice_hdr_.frame_num = 3;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(4, poc_);
+
+ // Ref frame.
+ slice_hdr_.frame_num = 4;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(6, poc_);
+
+ // Ref frame, detected as wrapping (ie, this is frame 16).
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(24, poc_);
+}
+
+TEST_F(H264POCTest, PicOrderCntType1_WithMMCO5) {
+ sps_.pic_order_cnt_type = 1;
+ sps_.log2_max_frame_num_minus4 = 0; // 16
+ sps_.num_ref_frames_in_pic_order_cnt_cycle = 2;
+ sps_.expected_delta_per_pic_order_cnt_cycle = 3;
+ sps_.offset_for_ref_frame[0] = 1;
+ sps_.offset_for_ref_frame[1] = 2;
+
+ // Initial IDR with POC 0.
+ slice_hdr_.idr_pic_flag = true;
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(0, poc_);
+
+ // Ref frame.
+ slice_hdr_.idr_pic_flag = false;
+ slice_hdr_.frame_num = 1;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(1, poc_);
+
+ // Ref frame, detected as wrapping.
+ SetMMCO5();
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(24, poc_);
+
+ // Ref frame, wrapping from before has been cleared.
+ slice_hdr_.frame_num = 1;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(1, poc_);
+}
+
+TEST_F(H264POCTest, PicOrderCntType2) {
+ sps_.pic_order_cnt_type = 2;
+
+ // Initial IDR with POC 0.
+ slice_hdr_.idr_pic_flag = true;
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(0, poc_);
+
+ // Ref frame.
+ slice_hdr_.idr_pic_flag = false;
+ slice_hdr_.frame_num = 1;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(2, poc_);
+
+ // Ref frame.
+ slice_hdr_.frame_num = 2;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(4, poc_);
+
+ // Ref frame.
+ slice_hdr_.frame_num = 3;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(6, poc_);
+
+ // Ref frame.
+ slice_hdr_.frame_num = 4;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(8, poc_);
+
+ // Ref frame, detected as wrapping (ie, this is frame 16).
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(32, poc_);
+}
+
+TEST_F(H264POCTest, PicOrderCntType2_WithMMCO5) {
+ sps_.pic_order_cnt_type = 2;
+
+ // Initial IDR with POC 0.
+ slice_hdr_.idr_pic_flag = true;
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(0, poc_);
+
+ // Ref frame.
+ slice_hdr_.idr_pic_flag = false;
+ slice_hdr_.frame_num = 1;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(2, poc_);
+
+ // Ref frame, detected as wrapping.
+ SetMMCO5();
+ slice_hdr_.frame_num = 0;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(32, poc_);
+
+ // Ref frame, wrapping from before has been cleared.
+ slice_hdr_.frame_num = 1;
+ ASSERT_TRUE(ComputePOC());
+ ASSERT_EQ(2, poc_);
+}
+
+} // namespace media
diff --git a/chromium/media/video/jpeg_decode_accelerator.h b/chromium/media/video/jpeg_decode_accelerator.h
new file mode 100644
index 00000000000..83c59403134
--- /dev/null
+++ b/chromium/media/video/jpeg_decode_accelerator.h
@@ -0,0 +1,134 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_JPEG_DECODE_ACCELERATOR_H_
+#define MEDIA_VIDEO_JPEG_DECODE_ACCELERATOR_H_
+
+#include "base/basictypes.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/media_export.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+// JPEG decoder interface.
+// The input are JPEG images including headers (Huffman tables may be omitted).
+// The output color format is I420. The decoder will convert the color format
+// to I420 if the color space or subsampling does not match that and if it is
+// capable of doing so. The client is responsible for allocating buffers and
+// keeps the ownership of them. All methods must be called on the same thread.
+// The intended use case of this interface is decoding MJPEG images coming
+// from camera capture. It can also be used for normal still JPEG image
+// decoding, but normal JPEG images may use more JPEG features that may not be
+// supported by a particular accelerator implementation and/or platform.
+class MEDIA_EXPORT JpegDecodeAccelerator {
+ public:
+ static const int32_t kInvalidBitstreamBufferId = -1;
+
+ // Enumeration of decode errors generated by NotifyError callback.
+ enum Error {
+ // Invalid argument was passed to an API method, e.g. the output buffer is
+ // too small, JPEG width/height are too big for JDA.
+ INVALID_ARGUMENT,
+ // Encoded input is unreadable, e.g. failed to map on another process.
+ UNREADABLE_INPUT,
+ // Failed to parse compressed JPEG picture.
+ PARSE_JPEG_FAILED,
+ // Failed to decode JPEG due to unsupported JPEG features, such as profiles,
+ // coding mode, or color formats.
+ UNSUPPORTED_JPEG,
+ // A fatal failure occurred in the GPU process layer or one of its
+ // dependencies. Examples of such failures include hardware failures,
+ // driver failures, library failures, browser programming errors, and so
+ // on. Client is responsible for destroying JDA after receiving this.
+ PLATFORM_FAILURE,
+ // Largest used enum. This should be adjusted when new errors are added.
+ LARGEST_ERROR_ENUM = PLATFORM_FAILURE,
+ };
+
+ class MEDIA_EXPORT Client {
+ public:
+ // Callback called after each successful Decode().
+ // Parameters:
+ // |bitstream_buffer_id| is the id of BitstreamBuffer corresponding to
+ // Decode() call.
+ virtual void VideoFrameReady(int32_t bitstream_buffer_id) = 0;
+
+ // Callback to notify errors. Client is responsible for destroying JDA when
+ // receiving a fatal error, i.e. PLATFORM_FAILURE. For other errors, client
+ // is informed about the buffer that failed to decode and may continue
+ // using the same instance of JDA.
+ // Parameters:
+ // |error| is the error code.
+ // |bitstream_buffer_id| is the bitstream buffer id that resulted in the
+ // recoverable error. For PLATFORM_FAILURE, |bitstream_buffer_id| may be
+ // kInvalidBitstreamBufferId if the error was not related to any
+ // particular buffer being processed.
+ virtual void NotifyError(int32_t bitstream_buffer_id, Error error) = 0;
+
+ protected:
+ virtual ~Client() {}
+ };
+
+ // JPEG decoder functions.
+
+ // Initializes the JPEG decoder. Should be called once per decoder
+ // construction. This call is synchronous and returns true iff initialization
+ // is successful.
+ // Parameters:
+ // |client| is the Client interface for decode callback. The provided
+ // pointer must be valid until Destroy() is called.
+ virtual bool Initialize(Client* client) = 0;
+
+ // Decodes the given bitstream buffer that contains one JPEG picture. It
+ // supports at least baseline encoding defined in JPEG ISO/IEC 10918-1. The
+ // decoder will convert the color format to I420 or return UNSUPPORTED_JPEG
+ // if it cannot convert. Client still owns this buffer, but should deallocate
+ // or access the buffer only after receiving a decode callback VideoFrameReady
+ // with the corresponding bitstream_buffer_id, or NotifyError.
+ // Parameters:
+ // |bitstream_buffer| contains encoded JPEG picture.
+ // |video_frame| contains an allocated video frame for the output.
+ // Client is responsible for filling the coded_size of video_frame and
+ // allocating its backing buffer. For now, only shared memory backed
+ // VideoFrames are supported. After decode completes, decoded JPEG picture
+ // will be filled into the |video_frame|.
+ // Ownership of the |bitstream_buffer| and |video_frame| remains with the
+ // client. The client is not allowed to deallocate them before
+ // VideoFrameReady or NotifyError() is invoked for given id of
+ // |bitstream_buffer|, or Destroy() returns.
+ virtual void Decode(const BitstreamBuffer& bitstream_buffer,
+ const scoped_refptr<media::VideoFrame>& video_frame) = 0;
+
+ // Destroys the decoder: all pending inputs are dropped immediately. This
+ // call may asynchronously free system resources, but its client-visible
+ // effects are synchronous. After this method returns, no more callbacks
+ // will be made on the client. Deletes |this| unconditionally, so make sure
+ // to drop all pointers to it!
+ virtual void Destroy() = 0;
+
+ protected:
+ // Do not delete directly; use Destroy() or own it with a scoped_ptr, which
+ // will Destroy() it properly by default.
+ virtual ~JpegDecodeAccelerator();
+};
+
+} // namespace media
+
+namespace base {
+
+template <class T>
+struct DefaultDeleter;
+
+// Specialize DefaultDeleter so that scoped_ptr<JpegDecodeAccelerator> always
+// uses "Destroy()" instead of trying to use the destructor.
+template <>
+struct MEDIA_EXPORT DefaultDeleter<media::JpegDecodeAccelerator> {
+ public:
+ void operator()(void* jpeg_decode_accelerator) const;
+};
+
+} // namespace base
+
+#endif // MEDIA_VIDEO_JPEG_DECODE_ACCELERATOR_H_
diff --git a/chromium/media/video/picture.cc b/chromium/media/video/picture.cc
index f0510131ea6..62057950ee3 100644
--- a/chromium/media/video/picture.cc
+++ b/chromium/media/video/picture.cc
@@ -7,9 +7,17 @@
namespace media {
PictureBuffer::PictureBuffer(int32 id, gfx::Size size, uint32 texture_id)
+ : id_(id), size_(size), texture_id_(texture_id), internal_texture_id_(0) {
+}
+
+PictureBuffer::PictureBuffer(int32 id,
+ gfx::Size size,
+ uint32 texture_id,
+ uint32 internal_texture_id)
: id_(id),
size_(size),
- texture_id_(texture_id) {
+ texture_id_(texture_id),
+ internal_texture_id_(internal_texture_id) {
}
PictureBuffer::PictureBuffer(int32 id,
@@ -19,15 +27,18 @@ PictureBuffer::PictureBuffer(int32 id,
: id_(id),
size_(size),
texture_id_(texture_id),
+ internal_texture_id_(0),
texture_mailbox_(texture_mailbox) {
}
Picture::Picture(int32 picture_buffer_id,
int32 bitstream_buffer_id,
- const gfx::Rect& visible_rect)
+ const gfx::Rect& visible_rect,
+ bool allow_overlay)
: picture_buffer_id_(picture_buffer_id),
bitstream_buffer_id_(bitstream_buffer_id),
- visible_rect_(visible_rect) {
+ visible_rect_(visible_rect),
+ allow_overlay_(allow_overlay) {
}
} // namespace media
diff --git a/chromium/media/video/picture.h b/chromium/media/video/picture.h
index 844e629ef31..1fb5096885f 100644
--- a/chromium/media/video/picture.h
+++ b/chromium/media/video/picture.h
@@ -9,7 +9,7 @@
#include "gpu/command_buffer/common/mailbox.h"
#include "media/base/media_export.h"
#include "ui/gfx/geometry/rect.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -21,6 +21,10 @@ class MEDIA_EXPORT PictureBuffer {
PictureBuffer(int32 id,
gfx::Size size,
uint32 texture_id,
+ uint32 internal_texture_id);
+ PictureBuffer(int32 id,
+ gfx::Size size,
+ uint32 texture_id,
const gpu::Mailbox& texture_mailbox);
// Returns the client-specified id of the buffer.
@@ -40,6 +44,8 @@ class MEDIA_EXPORT PictureBuffer {
return texture_id_;
}
+ uint32 internal_texture_id() const { return internal_texture_id_; }
+
const gpu::Mailbox& texture_mailbox() const {
return texture_mailbox_;
}
@@ -48,6 +54,7 @@ class MEDIA_EXPORT PictureBuffer {
int32 id_;
gfx::Size size_;
uint32 texture_id_;
+ uint32 internal_texture_id_;
gpu::Mailbox texture_mailbox_;
};
@@ -57,7 +64,8 @@ class MEDIA_EXPORT Picture {
public:
Picture(int32 picture_buffer_id,
int32 bitstream_buffer_id,
- const gfx::Rect& visible_rect);
+ const gfx::Rect& visible_rect,
+ bool allow_overlay);
// Returns the id of the picture buffer where this picture is contained.
int32 picture_buffer_id() const {
@@ -78,10 +86,13 @@ class MEDIA_EXPORT Picture {
// Picture contained in the PictureBuffer.
gfx::Rect visible_rect() const { return visible_rect_; }
+ bool allow_overlay() const { return allow_overlay_; }
+
private:
int32 picture_buffer_id_;
int32 bitstream_buffer_id_;
gfx::Rect visible_rect_;
+ bool allow_overlay_;
};
} // namespace media
diff --git a/chromium/media/video/video_decode_accelerator.cc b/chromium/media/video/video_decode_accelerator.cc
index a72912cf305..6ffbda53b50 100644
--- a/chromium/media/video/video_decode_accelerator.cc
+++ b/chromium/media/video/video_decode_accelerator.cc
@@ -4,6 +4,7 @@
#include "media/video/video_decode_accelerator.h"
+#include <GLES2/gl2.h>
#include "base/logging.h"
namespace media {
@@ -16,6 +17,17 @@ bool VideoDecodeAccelerator::CanDecodeOnIOThread() {
return false; // not reached
}
+GLenum VideoDecodeAccelerator::GetSurfaceInternalFormat() const {
+ return GL_RGBA;
+}
+
+VideoDecodeAccelerator::SupportedProfile::SupportedProfile()
+ : profile(media::VIDEO_CODEC_PROFILE_UNKNOWN) {
+}
+
+VideoDecodeAccelerator::SupportedProfile::~SupportedProfile() {
+}
+
} // namespace media
namespace base {
diff --git a/chromium/media/video/video_decode_accelerator.h b/chromium/media/video/video_decode_accelerator.h
index 4df3b1c9158..5ac87e605a3 100644
--- a/chromium/media/video/video_decode_accelerator.h
+++ b/chromium/media/video/video_decode_accelerator.h
@@ -11,7 +11,9 @@
#include "media/base/bitstream_buffer.h"
#include "media/base/video_decoder_config.h"
#include "media/video/picture.h"
-#include "ui/gfx/size.h"
+#include "ui/gfx/geometry/size.h"
+
+typedef unsigned int GLenum;
namespace media {
@@ -20,6 +22,17 @@ namespace media {
// implement the backend of PPB_VideoDecode_Dev.
class MEDIA_EXPORT VideoDecodeAccelerator {
public:
+ // Specification of a decoding profile supported by an decoder.
+ // |max_resolution| and |min_resolution| are inclusive.
+ struct MEDIA_EXPORT SupportedProfile {
+ SupportedProfile();
+ ~SupportedProfile();
+ VideoCodecProfile profile;
+ gfx::Size max_resolution;
+ gfx::Size min_resolution;
+ };
+ using SupportedProfiles = std::vector<SupportedProfile>;
+
// Enumeration of potential errors generated by the API.
// Note: Keep these in sync with PP_VideoDecodeError_Dev. Also do not
// rearrange, reuse or remove values as they are used for gathering UMA
@@ -143,6 +156,10 @@ class MEDIA_EXPORT VideoDecodeAccelerator {
// the first time so it can be cleared.
virtual bool CanDecodeOnIOThread();
+ // Windows creates a BGRA texture.
+ // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
+ virtual GLenum GetSurfaceInternalFormat() const;
+
protected:
// Do not delete directly; use Destroy() or own it with a scoped_ptr, which
// will Destroy() it properly by default.
diff --git a/chromium/media/video/video_encode_accelerator.cc b/chromium/media/video/video_encode_accelerator.cc
index d8a5838036f..dccd31d7bc3 100644
--- a/chromium/media/video/video_encode_accelerator.cc
+++ b/chromium/media/video/video_encode_accelerator.cc
@@ -8,6 +8,15 @@ namespace media {
VideoEncodeAccelerator::~VideoEncodeAccelerator() {}
+VideoEncodeAccelerator::SupportedProfile::SupportedProfile()
+ : profile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+ max_framerate_numerator(0),
+ max_framerate_denominator(0) {
+}
+
+VideoEncodeAccelerator::SupportedProfile::~SupportedProfile() {
+}
+
} // namespace media
namespace base {
diff --git a/chromium/media/video/video_encode_accelerator.h b/chromium/media/video/video_encode_accelerator.h
index 9551c22a769..19a2216d88b 100644
--- a/chromium/media/video/video_encode_accelerator.h
+++ b/chromium/media/video/video_encode_accelerator.h
@@ -23,12 +23,15 @@ class VideoFrame;
class MEDIA_EXPORT VideoEncodeAccelerator {
public:
// Specification of an encoding profile supported by an encoder.
- struct SupportedProfile {
+ struct MEDIA_EXPORT SupportedProfile {
+ SupportedProfile();
+ ~SupportedProfile();
VideoCodecProfile profile;
gfx::Size max_resolution;
uint32 max_framerate_numerator;
uint32 max_framerate_denominator;
};
+ using SupportedProfiles = std::vector<SupportedProfile>;
// Enumeration of potential errors generated by the API.
enum Error {
@@ -92,7 +95,7 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
// Returns a list of the supported codec profiles of the video encoder. This
// can be called before Initialize().
- virtual std::vector<SupportedProfile> GetSupportedProfiles() = 0;
+ virtual SupportedProfiles GetSupportedProfiles() = 0;
// Initializes the video encoder with specific configuration. Called once per
// encoder construction. This call is synchronous and returns true iff