summaryrefslogtreecommitdiff
path: root/chromium/media
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2013-12-11 21:33:03 +0100
committerAndras Becsi <andras.becsi@digia.com>2013-12-13 12:34:07 +0100
commitf2a33ff9cbc6d19943f1c7fbddd1f23d23975577 (patch)
tree0586a32aa390ade8557dfd6b4897f43a07449578 /chromium/media
parent5362912cdb5eea702b68ebe23702468d17c3017a (diff)
downloadqtwebengine-chromium-f2a33ff9cbc6d19943f1c7fbddd1f23d23975577.tar.gz
Update Chromium to branch 1650 (31.0.1650.63)
Change-Id: I57d8c832eaec1eb2364e0a8e7352a6dd354db99f Reviewed-by: Jocelyn Turcotte <jocelyn.turcotte@digia.com>
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/DEPS3
-rw-r--r--chromium/media/audio/android/audio_android_unittest.cc769
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc37
-rw-r--r--chromium/media/audio/android/audio_manager_android.h22
-rw-r--r--chromium/media/audio/android/opensles_input.cc197
-rw-r--r--chromium/media/audio/android/opensles_input.h24
-rw-r--r--chromium/media/audio/android/opensles_output.cc240
-rw-r--r--chromium/media/audio/android/opensles_output.h31
-rw-r--r--chromium/media/audio/android/opensles_wrapper.cc109
-rw-r--r--chromium/media/audio/async_socket_io_handler.h113
-rw-r--r--chromium/media/audio/async_socket_io_handler_posix.cc98
-rw-r--r--chromium/media/audio/async_socket_io_handler_unittest.cc168
-rw-r--r--chromium/media/audio/async_socket_io_handler_win.cc77
-rw-r--r--chromium/media/audio/audio_input_controller.cc70
-rw-r--r--chromium/media/audio/audio_input_controller.h40
-rw-r--r--chromium/media/audio/audio_input_controller_unittest.cc50
-rw-r--r--chromium/media/audio/audio_input_device.cc9
-rw-r--r--chromium/media/audio/audio_low_latency_input_output_unittest.cc3
-rw-r--r--chromium/media/audio/audio_manager.h39
-rw-r--r--chromium/media/audio/audio_manager_base.cc77
-rw-r--r--chromium/media/audio/audio_manager_base.h37
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc (renamed from chromium/media/audio/audio_input_device_unittest.cc)176
-rw-r--r--chromium/media/audio/audio_output_controller.cc43
-rw-r--r--chromium/media/audio/audio_output_controller.h30
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc6
-rw-r--r--chromium/media/audio/audio_output_dispatcher.cc2
-rw-r--r--chromium/media/audio/audio_output_dispatcher.h2
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc6
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h1
-rw-r--r--chromium/media/audio/audio_output_proxy_unittest.cc57
-rw-r--r--chromium/media/audio/audio_output_resampler.cc8
-rw-r--r--chromium/media/audio/audio_output_resampler.h4
-rw-r--r--chromium/media/audio/audio_parameters.h1
-rw-r--r--chromium/media/audio/clockless_audio_sink.cc107
-rw-r--r--chromium/media/audio/clockless_audio_sink.h55
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.cc38
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.h15
-rw-r--r--chromium/media/audio/cras/cras_input.cc6
-rw-r--r--chromium/media/audio/cras/cras_unified.cc1
-rw-r--r--chromium/media/audio/cross_process_notification.cc30
-rw-r--r--chromium/media/audio/cross_process_notification.h172
-rw-r--r--chromium/media/audio/cross_process_notification_posix.cc114
-rw-r--r--chromium/media/audio/cross_process_notification_unittest.cc462
-rw-r--r--chromium/media/audio/cross_process_notification_win.cc270
-rw-r--r--chromium/media/audio/ios/audio_manager_ios.h56
-rw-r--r--chromium/media/audio/ios/audio_manager_ios.mm140
-rw-r--r--chromium/media/audio/ios/audio_manager_ios_unittest.cc34
-rw-r--r--chromium/media/audio/ios/audio_session_util_ios.h17
-rw-r--r--chromium/media/audio/ios/audio_session_util_ios.mm40
-rw-r--r--chromium/media/audio/linux/alsa_output_unittest.cc6
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.cc91
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.h26
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac_unittest.cc2
-rw-r--r--chromium/media/audio/mac/audio_input_mac.cc5
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.cc32
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.h3
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc171
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.h11
-rw-r--r--chromium/media/audio/mock_audio_manager.cc22
-rw-r--r--chromium/media/audio/mock_audio_manager.h9
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.cc5
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.h2
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.cc52
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.h10
-rw-r--r--chromium/media/audio/shared_memory_util.cc2
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.cc12
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.h7
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc136
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.h8
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win_unittest.cc169
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc44
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h9
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc4
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc134
-rw-r--r--chromium/media/audio/win/audio_manager_win.h15
-rw-r--r--chromium/media/audio/win/audio_output_win_unittest.cc40
-rw-r--r--chromium/media/audio/win/audio_unified_win_unittest.cc15
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc128
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h22
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc71
-rw-r--r--chromium/media/audio/win/device_enumeration_win.cc55
-rw-r--r--chromium/media/audio/win/device_enumeration_win.h16
-rw-r--r--chromium/media/audio/win/wavein_input_win.h2
-rw-r--r--chromium/media/base/android/audio_decoder_job.cc77
-rw-r--r--chromium/media/base/android/audio_decoder_job.h55
-rw-r--r--chromium/media/base/android/demuxer_android.h77
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.cc13
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.h17
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java168
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java309
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java214
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java169
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java435
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java186
-rw-r--r--chromium/media/base/android/media_codec_bridge.cc278
-rw-r--r--chromium/media/base/android/media_codec_bridge.h110
-rw-r--r--chromium/media/base/android/media_codec_bridge_unittest.cc61
-rw-r--r--chromium/media/base/android/media_decoder_job.cc350
-rw-r--r--chromium/media/base/android/media_decoder_job.h170
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc211
-rw-r--r--chromium/media/base/android/media_drm_bridge.h101
-rw-r--r--chromium/media/base/android/media_jni_registrar.cc40
-rw-r--r--chromium/media/base/android/media_jni_registrar.h19
-rw-r--r--chromium/media/base/android/media_player_android.cc23
-rw-r--r--chromium/media/base/android/media_player_android.h40
-rw-r--r--chromium/media/base/android/media_player_bridge.cc46
-rw-r--r--chromium/media/base/android/media_player_bridge.h2
-rw-r--r--chromium/media/base/android/media_player_manager.h32
-rw-r--r--chromium/media/base/android/media_source_player.cc897
-rw-r--r--chromium/media/base/android/media_source_player.h207
-rw-r--r--chromium/media/base/android/media_source_player_unittest.cc606
-rw-r--r--chromium/media/base/android/video_decoder_job.cc68
-rw-r--r--chromium/media/base/android/video_decoder_job.h54
-rw-r--r--chromium/media/base/audio_buffer.cc7
-rw-r--r--chromium/media/base/audio_buffer.h9
-rw-r--r--chromium/media/base/audio_capturer_source.h3
-rw-r--r--chromium/media/base/audio_decoder_config.cc18
-rw-r--r--chromium/media/base/audio_decoder_config.h16
-rw-r--r--chromium/media/base/audio_hash.cc2
-rw-r--r--chromium/media/base/audio_hash.h2
-rw-r--r--chromium/media/base/audio_hash_unittest.cc2
-rw-r--r--chromium/media/base/bit_reader.cc2
-rw-r--r--chromium/media/base/channel_layout.cc69
-rw-r--r--chromium/media/base/channel_layout.h3
-rw-r--r--chromium/media/base/decoder_buffer.cc3
-rw-r--r--chromium/media/base/decoder_buffer.h11
-rw-r--r--chromium/media/base/demuxer.h14
-rw-r--r--chromium/media/base/keyboard_event_counter.cc42
-rw-r--r--chromium/media/base/keyboard_event_counter.h49
-rw-r--r--chromium/media/base/media.cc2
-rw-r--r--chromium/media/base/media_file_checker_unittest.cc2
-rw-r--r--chromium/media/base/media_keys.h7
-rw-r--r--chromium/media/base/media_stub.cc2
-rw-r--r--chromium/media/base/media_switches.cc20
-rw-r--r--chromium/media/base/media_switches.h10
-rw-r--r--chromium/media/base/pipeline.cc8
-rw-r--r--chromium/media/base/pipeline_unittest.cc28
-rw-r--r--chromium/media/base/run_all_unittests.cc17
-rw-r--r--chromium/media/base/scoped_histogram_timer_unittest.cc2
-rw-r--r--chromium/media/base/serial_runner.cc12
-rw-r--r--chromium/media/base/serial_runner.h12
-rw-r--r--chromium/media/base/serial_runner_unittest.cc176
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc13
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_c.cc32
-rw-r--r--chromium/media/base/simd/yuv_to_rgb_table.cc22
-rw-r--r--chromium/media/base/sinc_resampler.cc5
-rw-r--r--chromium/media/base/stream_parser.h3
-rw-r--r--chromium/media/base/user_input_monitor.cc74
-rw-r--r--chromium/media/base/user_input_monitor.h89
-rw-r--r--chromium/media/base/user_input_monitor_linux.cc362
-rw-r--r--chromium/media/base/user_input_monitor_mac.cc57
-rw-r--r--chromium/media/base/user_input_monitor_unittest.cc78
-rw-r--r--chromium/media/base/user_input_monitor_win.cc298
-rw-r--r--chromium/media/base/vector_math.cc5
-rw-r--r--chromium/media/base/video_frame.cc37
-rw-r--r--chromium/media/base/video_frame.h5
-rw-r--r--chromium/media/cast/DEPS4
-rw-r--r--chromium/media/cast/OWNERS2
-rw-r--r--chromium/media/cast/README64
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder.cc99
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder.h54
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder_unittest.cc201
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.cc304
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.gypi28
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.h117
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver_unittest.cc95
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder.cc172
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder.h63
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder_unittest.cc70
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.cc168
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.gypi30
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.h100
-rw-r--r--chromium/media/cast/audio_sender/audio_sender_unittest.cc85
-rw-r--r--chromium/media/cast/cast.gyp90
-rw-r--r--chromium/media/cast/cast_config.cc49
-rw-r--r--chromium/media/cast/cast_config.h218
-rw-r--r--chromium/media/cast/cast_defines.h122
-rw-r--r--chromium/media/cast/cast_receiver.gyp28
-rw-r--r--chromium/media/cast/cast_receiver.h75
-rw-r--r--chromium/media/cast/cast_sender.gyp35
-rw-r--r--chromium/media/cast/cast_sender.h89
-rw-r--r--chromium/media/cast/cast_sender_impl.cc176
-rw-r--r--chromium/media/cast/cast_sender_impl.h55
-rw-r--r--chromium/media/cast/cast_thread.cc64
-rw-r--r--chromium/media/cast/cast_thread.h71
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.cc112
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.gypi24
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.h54
-rw-r--r--chromium/media/cast/congestion_control/congestion_control_unittest.cc139
-rw-r--r--chromium/media/cast/framer/cast_message_builder.cc193
-rw-r--r--chromium/media/cast/framer/cast_message_builder.h73
-rw-r--r--chromium/media/cast/framer/cast_message_builder_unittest.cc503
-rw-r--r--chromium/media/cast/framer/frame_buffer.cc103
-rw-r--r--chromium/media/cast/framer/frame_buffer.h54
-rw-r--r--chromium/media/cast/framer/frame_buffer_unittest.cc88
-rw-r--r--chromium/media/cast/framer/frame_id_map.cc252
-rw-r--r--chromium/media/cast/framer/frame_id_map.h93
-rw-r--r--chromium/media/cast/framer/framer.cc146
-rw-r--r--chromium/media/cast/framer/framer.gyp27
-rw-r--r--chromium/media/cast/framer/framer.h84
-rw-r--r--chromium/media/cast/framer/framer_unittest.cc351
-rw-r--r--chromium/media/cast/pacing/mock_paced_packet_sender.h26
-rw-r--r--chromium/media/cast/pacing/mock_packet_sender.h22
-rw-r--r--chromium/media/cast/pacing/paced_sender.cc123
-rw-r--r--chromium/media/cast/pacing/paced_sender.gyp23
-rw-r--r--chromium/media/cast/pacing/paced_sender.h90
-rw-r--r--chromium/media/cast/pacing/paced_sender_unittest.cc265
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h38
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h37
-rw-r--r--chromium/media/cast/rtcp/rtcp.cc416
-rw-r--r--chromium/media/cast/rtcp/rtcp.gyp46
-rw-r--r--chromium/media/cast/rtcp/rtcp.h173
-rw-r--r--chromium/media/cast/rtcp/rtcp_defines.h113
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver.cc465
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver.h106
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver_unittest.cc380
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender.cc544
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender.h111
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender_unittest.cc285
-rw-r--r--chromium/media/cast/rtcp/rtcp_unittest.cc410
-rw-r--r--chromium/media/cast/rtcp/rtcp_utility.cc862
-rw-r--r--chromium/media/cast/rtcp/rtcp_utility.h319
-rw-r--r--chromium/media/cast/rtcp/test_rtcp_packet_builder.cc230
-rw-r--r--chromium/media/cast/rtcp/test_rtcp_packet_builder.h94
-rw-r--r--chromium/media/cast/rtp_common/mock_rtp_payload_feedback.h23
-rw-r--r--chromium/media/cast/rtp_common/rtp_defines.h48
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats.cc120
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats.h53
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc157
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h37
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc107
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi25
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h53
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc201
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.cc57
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.gyp27
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.h53
-rw-r--r--chromium/media/cast/rtp_sender/mock_rtp_sender.h34
-rw-r--r--chromium/media/cast/rtp_sender/packet_storage/packet_storage.cc141
-rw-r--r--chromium/media/cast/rtp_sender/packet_storage/packet_storage.gypi23
-rw-r--r--chromium/media/cast/rtp_sender/packet_storage/packet_storage.h59
-rw-r--r--chromium/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc114
-rw-r--r--chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc147
-rw-r--r--chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.gypi25
-rw-r--r--chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h73
-rw-r--r--chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h47
-rw-r--r--chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc145
-rw-r--r--chromium/media/cast/rtp_sender/rtp_sender.cc147
-rw-r--r--chromium/media/cast/rtp_sender/rtp_sender.gyp27
-rw-r--r--chromium/media/cast/rtp_sender/rtp_sender.h74
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc68
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp26
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h37
-rw-r--r--chromium/media/cast/video_receiver/video_decoder.cc66
-rw-r--r--chromium/media/cast/video_receiver/video_decoder.h48
-rw-r--r--chromium/media/cast/video_receiver/video_decoder_unittest.cc94
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.cc337
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.gypi30
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.h124
-rw-r--r--chromium/media/cast/video_receiver/video_receiver_unittest.cc138
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc352
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi19
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h87
-rw-r--r--chromium/media/cast/video_sender/mock_video_encoder_controller.h31
-rw-r--r--chromium/media/cast/video_sender/video_encoder.cc112
-rw-r--r--chromium/media/cast/video_sender/video_encoder.h80
-rw-r--r--chromium/media/cast/video_sender/video_encoder_unittest.cc245
-rw-r--r--chromium/media/cast/video_sender/video_sender.cc346
-rw-r--r--chromium/media/cast/video_sender/video_sender.gypi31
-rw-r--r--chromium/media/cast/video_sender/video_sender.h145
-rw-r--r--chromium/media/cast/video_sender/video_sender_unittest.cc204
-rw-r--r--chromium/media/cdm/aes_decryptor.cc221
-rw-r--r--chromium/media/cdm/aes_decryptor.h6
-rw-r--r--chromium/media/cdm/aes_decryptor_unittest.cc945
-rw-r--r--chromium/media/cdm/ppapi/cdm_wrapper.cc64
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc4
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.h3
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common_unittest.cc18
-rw-r--r--chromium/media/ffmpeg/ffmpeg_regression_tests.cc9
-rw-r--r--chromium/media/ffmpeg/ffmpeg_unittest.cc14
-rw-r--r--chromium/media/filters/audio_file_reader_unittest.cc2
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.cc528
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.h170
-rw-r--r--chromium/media/filters/audio_renderer_algorithm_unittest.cc319
-rw-r--r--chromium/media/filters/blocking_url_protocol.cc2
-rw-r--r--chromium/media/filters/blocking_url_protocol_unittest.cc5
-rw-r--r--chromium/media/filters/chunk_demuxer.cc1
-rw-r--r--chromium/media/filters/chunk_demuxer.h4
-rw-r--r--chromium/media/filters/chunk_demuxer_unittest.cc11
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.cc8
-rw-r--r--chromium/media/filters/decrypting_audio_decoder_unittest.cc3
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.cc50
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.h4
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream_unittest.cc13
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.cc50
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.cc34
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.h12
-rw-r--r--chromium/media/filters/ffmpeg_demuxer_unittest.cc14
-rw-r--r--chromium/media/filters/gpu_video_accelerator_factories.cc (renamed from chromium/media/filters/mock_gpu_video_decoder_factories.cc)6
-rw-r--r--chromium/media/filters/gpu_video_accelerator_factories.h (renamed from chromium/media/filters/gpu_video_decoder_factories.h)26
-rw-r--r--chromium/media/filters/gpu_video_decoder.cc20
-rw-r--r--chromium/media/filters/gpu_video_decoder.h10
-rw-r--r--chromium/media/filters/in_memory_url_protocol.cc2
-rw-r--r--chromium/media/filters/mock_gpu_video_accelerator_factories.cc28
-rw-r--r--chromium/media/filters/mock_gpu_video_accelerator_factories.h (renamed from chromium/media/filters/mock_gpu_video_decoder_factories.h)33
-rw-r--r--chromium/media/filters/opus_audio_decoder.cc60
-rw-r--r--chromium/media/filters/opus_audio_decoder.h4
-rw-r--r--chromium/media/filters/pipeline_integration_test.cc131
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.cc71
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.h23
-rw-r--r--chromium/media/filters/skcanvas_video_renderer.cc2
-rw-r--r--chromium/media/filters/skcanvas_video_renderer_unittest.cc6
-rw-r--r--chromium/media/filters/source_buffer_stream.cc24
-rw-r--r--chromium/media/filters/source_buffer_stream_unittest.cc42
-rw-r--r--chromium/media/filters/stream_parser_factory.cc122
-rw-r--r--chromium/media/filters/stream_parser_factory.h2
-rw-r--r--chromium/media/filters/video_decoder_selector.cc108
-rw-r--r--chromium/media/filters/video_decoder_selector.h10
-rw-r--r--chromium/media/filters/video_decoder_selector_unittest.cc94
-rw-r--r--chromium/media/filters/video_frame_stream.cc27
-rw-r--r--chromium/media/filters/video_frame_stream_unittest.cc77
-rw-r--r--chromium/media/filters/vpx_video_decoder.cc2
-rw-r--r--chromium/media/filters/wsola_internals.cc264
-rw-r--r--chromium/media/filters/wsola_internals.h93
-rw-r--r--chromium/media/media.gyp451
-rw-r--r--chromium/media/media_cdm.gypi6
-rw-r--r--chromium/media/media_untrusted.gyp2
-rw-r--r--chromium/media/midi/midi_manager.cc14
-rw-r--r--chromium/media/midi/midi_manager.h26
-rw-r--r--chromium/media/midi/midi_manager_mac.cc38
-rw-r--r--chromium/media/midi/midi_manager_mac.h12
-rw-r--r--chromium/media/mp2t/es_parser.h42
-rw-r--r--chromium/media/mp2t/es_parser_adts.cc295
-rw-r--r--chromium/media/mp2t/es_parser_adts.h81
-rw-r--r--chromium/media/mp2t/es_parser_h264.cc507
-rw-r--r--chromium/media/mp2t/es_parser_h264.h97
-rw-r--r--chromium/media/mp2t/mp2t_common.h21
-rw-r--r--chromium/media/mp2t/mp2t_stream_parser.cc616
-rw-r--r--chromium/media/mp2t/mp2t_stream_parser.h133
-rw-r--r--chromium/media/mp2t/mp2t_stream_parser_unittest.cc189
-rw-r--r--chromium/media/mp2t/ts_packet.cc215
-rw-r--r--chromium/media/mp2t/ts_packet.h73
-rw-r--r--chromium/media/mp2t/ts_section.h40
-rw-r--r--chromium/media/mp2t/ts_section_pat.cc122
-rw-r--r--chromium/media/mp2t/ts_section_pat.h40
-rw-r--r--chromium/media/mp2t/ts_section_pes.cc312
-rw-r--r--chromium/media/mp2t/ts_section_pes.h64
-rw-r--r--chromium/media/mp2t/ts_section_pmt.cc122
-rw-r--r--chromium/media/mp2t/ts_section_pmt.h40
-rw-r--r--chromium/media/mp2t/ts_section_psi.cc132
-rw-r--r--chromium/media/mp2t/ts_section_psi.h54
-rw-r--r--chromium/media/mp3/mp3_stream_parser.cc598
-rw-r--r--chromium/media/mp3/mp3_stream_parser.h127
-rw-r--r--chromium/media/mp3/mp3_stream_parser_unittest.cc175
-rw-r--r--chromium/media/mp4/cenc.cc10
-rw-r--r--chromium/media/mp4/cenc.h4
-rw-r--r--chromium/media/mp4/mp4_stream_parser.cc9
-rw-r--r--chromium/media/mp4/mp4_stream_parser_unittest.cc7
-rw-r--r--chromium/media/mp4/track_run_iterator.cc5
-rwxr-xr-xchromium/media/tools/bug_hunter/bug_hunter.py380
-rw-r--r--chromium/media/tools/bug_hunter/bug_hunter_test.py96
-rw-r--r--chromium/media/tools/bug_hunter/bug_hunter_unittest.py182
-rw-r--r--chromium/media/tools/demuxer_bench/demuxer_bench.cc6
-rw-r--r--chromium/media/tools/media_bench/media_bench.cc588
-rw-r--r--chromium/media/tools/player_x11/player_x11.cc4
-rw-r--r--chromium/media/tools/seek_tester/seek_tester.cc112
-rw-r--r--chromium/media/tools/shader_bench/cpu_color_painter.cc96
-rw-r--r--chromium/media/tools/shader_bench/cpu_color_painter.h33
-rw-r--r--chromium/media/tools/shader_bench/gpu_color_painter.cc122
-rw-r--r--chromium/media/tools/shader_bench/gpu_color_painter.h35
-rw-r--r--chromium/media/tools/shader_bench/gpu_painter.cc92
-rw-r--r--chromium/media/tools/shader_bench/gpu_painter.h44
-rw-r--r--chromium/media/tools/shader_bench/painter.cc26
-rw-r--r--chromium/media/tools/shader_bench/painter.h39
-rw-r--r--chromium/media/tools/shader_bench/shader_bench.cc160
-rw-r--r--chromium/media/tools/shader_bench/window.cc19
-rw-r--r--chromium/media/tools/shader_bench/window.h61
-rw-r--r--chromium/media/tools/shader_bench/window_linux.cc87
-rw-r--r--chromium/media/tools/shader_bench/window_win.cc134
-rw-r--r--chromium/media/video/capture/android/imageformat_list.h22
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.cc63
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.h14
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.cc27
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.h4
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.cc68
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.h2
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.h26
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.mm145
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h5
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm82
-rw-r--r--chromium/media/video/capture/video_capture.h35
-rw-r--r--chromium/media/video/capture/video_capture_device.cc36
-rw-r--r--chromium/media/video/capture/video_capture_device.h75
-rw-r--r--chromium/media/video/capture/video_capture_device_dummy.cc30
-rw-r--r--chromium/media/video/capture/video_capture_device_dummy.h37
-rw-r--r--chromium/media/video/capture/video_capture_device_unittest.cc224
-rw-r--r--chromium/media/video/capture/video_capture_proxy.cc23
-rw-r--r--chromium/media/video/capture/video_capture_proxy.h19
-rw-r--r--chromium/media/video/capture/video_capture_types.cc60
-rw-r--r--chromium/media/video/capture/video_capture_types.h89
-rw-r--r--chromium/media/video/capture/win/filter_base_win.cc2
-rw-r--r--chromium/media/video/capture/win/pin_base_win.cc16
-rw-r--r--chromium/media/video/capture/win/sink_input_pin_win.cc8
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.cc18
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.h2
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.cc38
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.h2
-rw-r--r--chromium/media/video/video_encode_accelerator.cc (renamed from chromium/media/filters/gpu_video_decoder_factories.cc)4
-rw-r--r--chromium/media/video/video_encode_accelerator.h145
-rw-r--r--chromium/media/webm/webm_audio_client.cc15
-rw-r--r--chromium/media/webm/webm_audio_client.h2
-rw-r--r--chromium/media/webm/webm_cluster_parser.cc34
-rw-r--r--chromium/media/webm/webm_cluster_parser.h8
-rw-r--r--chromium/media/webm/webm_constants.h4
-rw-r--r--chromium/media/webm/webm_parser.cc4
-rw-r--r--chromium/media/webm/webm_stream_parser.cc7
-rw-r--r--chromium/media/webm/webm_tracks_parser.cc12
-rw-r--r--chromium/media/webm/webm_tracks_parser.h2
418 files changed, 29545 insertions, 9606 deletions
diff --git a/chromium/media/DEPS b/chromium/media/DEPS
index 9c27abac17f..b46ee56b501 100644
--- a/chromium/media/DEPS
+++ b/chromium/media/DEPS
@@ -1,10 +1,13 @@
include_rules = [
"+gpu",
"+jni",
+ "+net/http",
"+third_party/ffmpeg",
"+third_party/libvpx",
"+third_party/opus",
"+third_party/skia",
+ "+ui/base",
+ "+ui/events",
"+ui/gfx",
"+ui/gl",
]
diff --git a/chromium/media/audio/android/audio_android_unittest.cc b/chromium/media/audio/android/audio_android_unittest.cc
new file mode 100644
index 00000000000..a8e448f821f
--- /dev/null
+++ b/chromium/media/audio/android/audio_android_unittest.cc
@@ -0,0 +1,769 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/file_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "media/audio/android/audio_manager_android.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/seekable_buffer.h"
+#include "media/base/test_data_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::Invoke;
+using ::testing::NotNull;
+using ::testing::Return;
+
+namespace media {
+
+ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
+ if (++*count >= limit) {
+ loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+ }
+}
+
+static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
+static const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw";
+static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
+static const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw";
+
+static const float kCallbackTestTimeMs = 2000.0;
+static const int kBitsPerSample = 16;
+static const int kBytesPerSample = kBitsPerSample / 8;
+
+// Converts AudioParameters::Format enumerator to readable string.
+static std::string FormatToString(AudioParameters::Format format) {
+ switch (format) {
+ case AudioParameters::AUDIO_PCM_LINEAR:
+ return std::string("AUDIO_PCM_LINEAR");
+ case AudioParameters::AUDIO_PCM_LOW_LATENCY:
+ return std::string("AUDIO_PCM_LOW_LATENCY");
+ case AudioParameters::AUDIO_FAKE:
+ return std::string("AUDIO_FAKE");
+ case AudioParameters::AUDIO_LAST_FORMAT:
+ return std::string("AUDIO_LAST_FORMAT");
+ default:
+ return std::string();
+ }
+}
+
+// Converts ChannelLayout enumerator to readable string. Does not include
+// multi-channel cases since these layouts are not supported on Android.
+static std::string LayoutToString(ChannelLayout channel_layout) {
+ switch (channel_layout) {
+ case CHANNEL_LAYOUT_NONE:
+ return std::string("CHANNEL_LAYOUT_NONE");
+ case CHANNEL_LAYOUT_MONO:
+ return std::string("CHANNEL_LAYOUT_MONO");
+ case CHANNEL_LAYOUT_STEREO:
+ return std::string("CHANNEL_LAYOUT_STEREO");
+ case CHANNEL_LAYOUT_UNSUPPORTED:
+ default:
+ return std::string("CHANNEL_LAYOUT_UNSUPPORTED");
+ }
+}
+
+static double ExpectedTimeBetweenCallbacks(AudioParameters params) {
+ return (base::TimeDelta::FromMicroseconds(
+ params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
+ static_cast<double>(params.sample_rate()))).InMillisecondsF();
+}
+
+std::ostream& operator<<(std::ostream& os, const AudioParameters& params) {
+ using namespace std;
+ os << endl << "format: " << FormatToString(params.format()) << endl
+ << "channel layout: " << LayoutToString(params.channel_layout()) << endl
+ << "sample rate: " << params.sample_rate() << endl
+ << "bits per sample: " << params.bits_per_sample() << endl
+ << "frames per buffer: " << params.frames_per_buffer() << endl
+ << "channels: " << params.channels() << endl
+ << "bytes per buffer: " << params.GetBytesPerBuffer() << endl
+ << "bytes per second: " << params.GetBytesPerSecond() << endl
+ << "bytes per frame: " << params.GetBytesPerFrame() << endl
+ << "frame size in ms: " << ExpectedTimeBetweenCallbacks(params);
+ return os;
+}
+
+// Gmock implementation of AudioInputStream::AudioInputCallback.
+class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
+ public:
+ MOCK_METHOD5(OnData,
+ void(AudioInputStream* stream,
+ const uint8* src,
+ uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume));
+ MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD1(OnError, void(AudioInputStream* stream));
+};
+
+// Gmock implementation of AudioOutputStream::AudioSourceCallback.
+class MockAudioOutputCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData,
+ int(AudioBus* dest, AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData,
+ int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+
+ // We clear the data bus to ensure that the test does not cause noise.
+ int RealOnMoreData(AudioBus* dest, AudioBuffersState buffers_state) {
+ dest->Zero();
+ return dest->frames();
+ }
+};
+
+// Implements AudioOutputStream::AudioSourceCallback and provides audio data
+// by reading from a data file.
+class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
+ public:
+ explicit FileAudioSource(base::WaitableEvent* event, const std::string& name)
+ : event_(event), pos_(0) {
+ // Reads a test file from media/test/data directory and stores it in
+ // a DecoderBuffer.
+ file_ = ReadTestDataFile(name);
+
+ // Log the name of the file which is used as input for this test.
+ base::FilePath file_path = GetTestDataFilePath(name);
+ LOG(INFO) << "Reading from file: " << file_path.value().c_str();
+ }
+
+ virtual ~FileAudioSource() {}
+
+ // AudioOutputStream::AudioSourceCallback implementation.
+
+ // Use samples read from a data file and fill up the audio buffer
+ // provided to us in the callback.
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState buffers_state) OVERRIDE {
+ bool stop_playing = false;
+ int max_size =
+ audio_bus->frames() * audio_bus->channels() * kBytesPerSample;
+
+ // Adjust data size and prepare for end signal if file has ended.
+ if (pos_ + max_size > file_size()) {
+ stop_playing = true;
+ max_size = file_size() - pos_;
+ }
+
+ // File data is stored as interleaved 16-bit values. Copy data samples from
+ // the file and deinterleave to match the audio bus format.
+ // FromInterleaved() will zero out any unfilled frames when there is not
+ // sufficient data remaining in the file to fill up the complete frame.
+ int frames = max_size / (audio_bus->channels() * kBytesPerSample);
+ if (max_size) {
+ audio_bus->FromInterleaved(file_->data() + pos_, frames, kBytesPerSample);
+ pos_ += max_size;
+ }
+
+ // Set event to ensure that the test can stop when the file has ended.
+ if (stop_playing)
+ event_->Signal();
+
+ return frames;
+ }
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE {
+ NOTREACHED();
+ return 0;
+ }
+
+ virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
+
+ int file_size() { return file_->data_size(); }
+
+ private:
+ base::WaitableEvent* event_;
+ int pos_;
+ scoped_refptr<DecoderBuffer> file_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileAudioSource);
+};
+
+// Implements AudioInputStream::AudioInputCallback and writes the recorded
+// audio data to a local output file. Note that this implementation should
+// only be used for manually invoked and evaluated tests, hence the created
+// file will not be destroyed after the test is done since the intention is
+// that it shall be available for off-line analysis.
+class FileAudioSink : public AudioInputStream::AudioInputCallback {
+ public:
+ explicit FileAudioSink(base::WaitableEvent* event,
+ const AudioParameters& params,
+ const std::string& file_name)
+ : event_(event), params_(params) {
+ // Allocate space for ~10 seconds of data.
+ const int kMaxBufferSize = 10 * params.GetBytesPerSecond();
+ buffer_.reset(new media::SeekableBuffer(0, kMaxBufferSize));
+
+ // Open up the binary file which will be written to in the destructor.
+ base::FilePath file_path;
+ EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
+ file_path = file_path.AppendASCII(file_name.c_str());
+ binary_file_ = file_util::OpenFile(file_path, "wb");
+ DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
+ LOG(INFO) << "Writing to file: " << file_path.value().c_str();
+ }
+
+ virtual ~FileAudioSink() {
+ int bytes_written = 0;
+ while (bytes_written < buffer_->forward_capacity()) {
+ const uint8* chunk;
+ int chunk_size;
+
+ // Stop writing if no more data is available.
+ if (!buffer_->GetCurrentChunk(&chunk, &chunk_size))
+ break;
+
+ // Write recorded data chunk to the file and prepare for next chunk.
+ // TODO(henrika): use file_util:: instead.
+ fwrite(chunk, 1, chunk_size, binary_file_);
+ buffer_->Seek(chunk_size);
+ bytes_written += chunk_size;
+ }
+ file_util::CloseFile(binary_file_);
+ }
+
+ // AudioInputStream::AudioInputCallback implementation.
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src,
+ uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ // Store data data in a temporary buffer to avoid making blocking
+ // fwrite() calls in the audio callback. The complete buffer will be
+ // written to file in the destructor.
+ if (!buffer_->Append(src, size))
+ event_->Signal();
+ }
+
+ virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
+ virtual void OnError(AudioInputStream* stream) OVERRIDE {}
+
+ private:
+ base::WaitableEvent* event_;
+ AudioParameters params_;
+ scoped_ptr<media::SeekableBuffer> buffer_;
+ FILE* binary_file_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileAudioSink);
+};
+
+// Implements AudioInputCallback and AudioSourceCallback to support full
+// duplex audio where captured samples are played out in loopback after
+// reading from a temporary FIFO storage.
+class FullDuplexAudioSinkSource
+ : public AudioInputStream::AudioInputCallback,
+ public AudioOutputStream::AudioSourceCallback {
+ public:
+ explicit FullDuplexAudioSinkSource(const AudioParameters& params)
+ : params_(params),
+ previous_time_(base::TimeTicks::Now()),
+ started_(false) {
+ // Start with a reasonably small FIFO size. It will be increased
+ // dynamically during the test if required.
+ fifo_.reset(new media::SeekableBuffer(0, 2 * params.GetBytesPerBuffer()));
+ buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
+ }
+
+ virtual ~FullDuplexAudioSinkSource() {}
+
+ // AudioInputStream::AudioInputCallback implementation
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src,
+ uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_time_).InMilliseconds();
+
+ base::AutoLock lock(lock_);
+ if (diff > 1000) {
+ started_ = true;
+ previous_time_ = now_time;
+
+ // Log out the extra delay added by the FIFO. This is a best effort
+ // estimate. We might be +- 10ms off here.
+ int extra_fifo_delay =
+ static_cast<int>(BytesToMilliseconds(fifo_->forward_bytes() + size));
+ DVLOG(1) << extra_fifo_delay;
+ }
+
+ // We add an initial delay of ~1 second before loopback starts to ensure
+ // a stable callback sequence and to avoid initial bursts which might add
+ // to the extra FIFO delay.
+ if (!started_)
+ return;
+
+ // Append new data to the FIFO and extend the size if the max capacity
+ // was exceeded. Flush the FIFO when extended just in case.
+ if (!fifo_->Append(src, size)) {
+ fifo_->set_forward_capacity(2 * fifo_->forward_capacity());
+ fifo_->Clear();
+ }
+ }
+
+ virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
+ virtual void OnError(AudioInputStream* stream) OVERRIDE {}
+
+ // AudioOutputStream::AudioSourceCallback implementation
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE {
+ const int size_in_bytes =
+ (params_.bits_per_sample() / 8) * dest->frames() * dest->channels();
+ EXPECT_EQ(size_in_bytes, params_.GetBytesPerBuffer());
+
+ base::AutoLock lock(lock_);
+
+ // We add an initial delay of ~1 second before loopback starts to ensure
+ // a stable callback sequences and to avoid initial bursts which might add
+ // to the extra FIFO delay.
+ if (!started_) {
+ dest->Zero();
+ return dest->frames();
+ }
+
+ // Fill up destination with zeros if the FIFO does not contain enough
+ // data to fulfill the request.
+ if (fifo_->forward_bytes() < size_in_bytes) {
+ dest->Zero();
+ } else {
+ fifo_->Read(buffer_.get(), size_in_bytes);
+ dest->FromInterleaved(
+ buffer_.get(), dest->frames(), params_.bits_per_sample() / 8);
+ }
+
+ return dest->frames();
+ }
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE {
+ NOTREACHED();
+ return 0;
+ }
+
+ virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
+
+ private:
+ // Converts from bytes to milliseconds given number of bytes and existing
+ // audio parameters.
+ double BytesToMilliseconds(int bytes) const {
+ const int frames = bytes / params_.GetBytesPerFrame();
+ return (base::TimeDelta::FromMicroseconds(
+ frames * base::Time::kMicrosecondsPerSecond /
+ static_cast<double>(params_.sample_rate()))).InMillisecondsF();
+ }
+
+ AudioParameters params_;
+ base::TimeTicks previous_time_;
+ base::Lock lock_;
+ scoped_ptr<media::SeekableBuffer> fifo_;
+ scoped_ptr<uint8[]> buffer_;
+ bool started_;
+
+ DISALLOW_COPY_AND_ASSIGN(FullDuplexAudioSinkSource);
+};
+
+// Test fixture class.
+class AudioAndroidTest : public testing::Test {
+ public:
+ AudioAndroidTest() {}
+
+ protected:
+ virtual void SetUp() {
+ audio_manager_.reset(AudioManager::Create());
+ loop_.reset(new base::MessageLoopForUI());
+ }
+
+ virtual void TearDown() {}
+
+ AudioManager* audio_manager() { return audio_manager_.get(); }
+ base::MessageLoopForUI* loop() { return loop_.get(); }
+
+ AudioParameters GetDefaultInputStreamParameters() {
+ return audio_manager()->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
+ }
+
+ AudioParameters GetDefaultOutputStreamParameters() {
+ return audio_manager()->GetDefaultOutputStreamParameters();
+ }
+
+ double AverageTimeBetweenCallbacks(int num_callbacks) const {
+ return ((end_time_ - start_time_) / static_cast<double>(num_callbacks - 1))
+ .InMillisecondsF();
+ }
+
+ void StartInputStreamCallbacks(const AudioParameters& params) {
+ double expected_time_between_callbacks_ms =
+ ExpectedTimeBetweenCallbacks(params);
+ const int num_callbacks =
+ (kCallbackTestTimeMs / expected_time_between_callbacks_ms);
+ AudioInputStream* stream = audio_manager()->MakeAudioInputStream(
+ params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(stream);
+
+ int count = 0;
+ MockAudioInputCallback sink;
+
+ EXPECT_CALL(sink,
+ OnData(stream, NotNull(), params.GetBytesPerBuffer(), _, _))
+ .Times(AtLeast(num_callbacks))
+ .WillRepeatedly(
+ CheckCountAndPostQuitTask(&count, num_callbacks, loop()));
+ EXPECT_CALL(sink, OnError(stream)).Times(0);
+ EXPECT_CALL(sink, OnClose(stream)).Times(1);
+
+ EXPECT_TRUE(stream->Open());
+ stream->Start(&sink);
+ start_time_ = base::TimeTicks::Now();
+ loop()->Run();
+ end_time_ = base::TimeTicks::Now();
+ stream->Stop();
+ stream->Close();
+
+ double average_time_between_callbacks_ms =
+ AverageTimeBetweenCallbacks(num_callbacks);
+ LOG(INFO) << "expected time between callbacks: "
+ << expected_time_between_callbacks_ms << " ms";
+ LOG(INFO) << "average time between callbacks: "
+ << average_time_between_callbacks_ms << " ms";
+ EXPECT_GE(average_time_between_callbacks_ms,
+ 0.70 * expected_time_between_callbacks_ms);
+ EXPECT_LE(average_time_between_callbacks_ms,
+ 1.30 * expected_time_between_callbacks_ms);
+ }
+
+ void StartOutputStreamCallbacks(const AudioParameters& params) {
+ double expected_time_between_callbacks_ms =
+ ExpectedTimeBetweenCallbacks(params);
+ const int num_callbacks =
+ (kCallbackTestTimeMs / expected_time_between_callbacks_ms);
+ AudioOutputStream* stream = audio_manager()->MakeAudioOutputStream(
+ params, std::string(), std::string());
+ EXPECT_TRUE(stream);
+
+ int count = 0;
+ MockAudioOutputCallback source;
+
+ EXPECT_CALL(source, OnMoreData(NotNull(), _))
+ .Times(AtLeast(num_callbacks))
+ .WillRepeatedly(
+ DoAll(CheckCountAndPostQuitTask(&count, num_callbacks, loop()),
+ Invoke(&source, &MockAudioOutputCallback::RealOnMoreData)));
+ EXPECT_CALL(source, OnError(stream)).Times(0);
+ EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
+
+ EXPECT_TRUE(stream->Open());
+ stream->Start(&source);
+ start_time_ = base::TimeTicks::Now();
+ loop()->Run();
+ end_time_ = base::TimeTicks::Now();
+ stream->Stop();
+ stream->Close();
+
+ double average_time_between_callbacks_ms =
+ AverageTimeBetweenCallbacks(num_callbacks);
+ LOG(INFO) << "expected time between callbacks: "
+ << expected_time_between_callbacks_ms << " ms";
+ LOG(INFO) << "average time between callbacks: "
+ << average_time_between_callbacks_ms << " ms";
+ EXPECT_GE(average_time_between_callbacks_ms,
+ 0.70 * expected_time_between_callbacks_ms);
+ EXPECT_LE(average_time_between_callbacks_ms,
+ 1.30 * expected_time_between_callbacks_ms);
+ }
+
+ scoped_ptr<base::MessageLoopForUI> loop_;
+ scoped_ptr<AudioManager> audio_manager_;
+ base::TimeTicks start_time_;
+ base::TimeTicks end_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioAndroidTest);
+};
+
+// Get the default audio input parameters and log the result.
+TEST_F(AudioAndroidTest, GetInputStreamParameters) {
+ AudioParameters params = GetDefaultInputStreamParameters();
+ EXPECT_TRUE(params.IsValid());
+ VLOG(1) << params;
+}
+
+// Get the default audio output parameters and log the result.
+TEST_F(AudioAndroidTest, GetDefaultOutputStreamParameters) {
+ AudioParameters params = GetDefaultOutputStreamParameters();
+ EXPECT_TRUE(params.IsValid());
+ VLOG(1) << params;
+}
+
+// Check if low-latency output is supported and log the result as output.
+TEST_F(AudioAndroidTest, IsAudioLowLatencySupported) {
+ AudioManagerAndroid* manager =
+ static_cast<AudioManagerAndroid*>(audio_manager());
+ bool low_latency = manager->IsAudioLowLatencySupported();
+ low_latency ? LOG(INFO) << "Low latency output is supported"
+ : LOG(INFO) << "Low latency output is *not* supported";
+}
+
+// Ensure that a default input stream can be created and closed.
+TEST_F(AudioAndroidTest, CreateAndCloseInputStream) {
+ AudioParameters params = GetDefaultInputStreamParameters();
+ AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
+ params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+ ais->Close();
+}
+
+// Ensure that a default output stream can be created and closed.
+// TODO(henrika): should we also verify that this API changes the audio mode
+// to communication mode, and calls RegisterHeadsetReceiver, the first time
+// it is called?
+TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) {
+ AudioParameters params = GetDefaultOutputStreamParameters();
+ AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
+ params, std::string(), std::string());
+ EXPECT_TRUE(aos);
+ aos->Close();
+}
+
+// Ensure that a default input stream can be opened and closed.
+TEST_F(AudioAndroidTest, OpenAndCloseInputStream) {
+ AudioParameters params = GetDefaultInputStreamParameters();
+ AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
+ params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+ EXPECT_TRUE(ais->Open());
+ ais->Close();
+}
+
+// Ensure that a default output stream can be opened and closed.
+TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) {
+ AudioParameters params = GetDefaultOutputStreamParameters();
+ AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
+ params, std::string(), std::string());
+ EXPECT_TRUE(aos);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+}
+
+// Start input streaming using default input parameters and ensure that the
+// callback sequence is sane.
+TEST_F(AudioAndroidTest, StartInputStreamCallbacks) {
+ AudioParameters params = GetDefaultInputStreamParameters();
+ StartInputStreamCallbacks(params);
+}
+
+// Start input streaming using non default input parameters and ensure that the
+// callback sequence is sane. The only change we make in this test is to select
+// a 10ms buffer size instead of the default size.
+// TODO(henrika): possibly add support for more variations.
+TEST_F(AudioAndroidTest, StartInputStreamCallbacksNonDefaultParameters) {
+ AudioParameters native_params = GetDefaultInputStreamParameters();
+ AudioParameters params(native_params.format(),
+ native_params.channel_layout(),
+ native_params.sample_rate(),
+ native_params.bits_per_sample(),
+ native_params.sample_rate() / 100);
+ StartInputStreamCallbacks(params);
+}
+
+// Start output streaming using default output parameters and ensure that the
+// callback sequence is sane.
+TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) {
+ AudioParameters params = GetDefaultOutputStreamParameters();
+ StartOutputStreamCallbacks(params);
+}
+
+// Start output streaming using non default output parameters and ensure that
+// the callback sequence is sane. The only change we make in this test is to
+// select a 10ms buffer size instead of the default size and to open up the
+// device in mono.
+// TODO(henrika): possibly add support for more variations.
+TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) {
+ AudioParameters native_params = GetDefaultOutputStreamParameters();
+ AudioParameters params(native_params.format(),
+ CHANNEL_LAYOUT_MONO,
+ native_params.sample_rate(),
+ native_params.bits_per_sample(),
+ native_params.sample_rate() / 100);
+ StartOutputStreamCallbacks(params);
+}
+
+// Play out a PCM file segment in real time and allow the user to verify that
+// the rendered audio sounds OK.
+// NOTE: this test requires user interaction and is not designed to run as an
+// automatized test on bots.
+TEST_F(AudioAndroidTest, DISABLED_RunOutputStreamWithFileAsSource) {
+ AudioParameters params = GetDefaultOutputStreamParameters();
+ VLOG(1) << params;
+ AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
+ params, std::string(), std::string());
+ EXPECT_TRUE(aos);
+
+ std::string file_name;
+ if (params.sample_rate() == 48000 && params.channels() == 2) {
+ file_name = kSpeechFile_16b_s_48k;
+ } else if (params.sample_rate() == 48000 && params.channels() == 1) {
+ file_name = kSpeechFile_16b_m_48k;
+ } else if (params.sample_rate() == 44100 && params.channels() == 2) {
+ file_name = kSpeechFile_16b_s_44k;
+ } else if (params.sample_rate() == 44100 && params.channels() == 1) {
+ file_name = kSpeechFile_16b_m_44k;
+ } else {
+ FAIL() << "This test supports 44.1kHz and 48kHz mono/stereo only.";
+ return;
+ }
+
+ base::WaitableEvent event(false, false);
+ FileAudioSource source(&event, file_name);
+
+ EXPECT_TRUE(aos->Open());
+ aos->SetVolume(1.0);
+ aos->Start(&source);
+ LOG(INFO) << ">> Verify that the file is played out correctly...";
+ EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
+ aos->Stop();
+ aos->Close();
+}
+
+// Start input streaming and run it for ten seconds while recording to a
+// local audio file.
+// NOTE: this test requires user interaction and is not designed to run as an
+// automatized test on bots.
+TEST_F(AudioAndroidTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
+ AudioParameters params = GetDefaultInputStreamParameters();
+ VLOG(1) << params;
+ AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
+ params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+
+ std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm",
+ params.sample_rate(),
+ params.frames_per_buffer(),
+ params.channels());
+
+ base::WaitableEvent event(false, false);
+ FileAudioSink sink(&event, params, file_name);
+
+ EXPECT_TRUE(ais->Open());
+ ais->Start(&sink);
+ LOG(INFO) << ">> Speak into the microphone to record audio...";
+ EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
+ ais->Stop();
+ ais->Close();
+}
+
+// Same test as RunSimplexInputStreamWithFileAsSink but this time output
+// streaming is active as well (reads zeros only).
+// NOTE: this test requires user interaction and is not designed to run as an
+// automatized test on bots.
+TEST_F(AudioAndroidTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
+ AudioParameters in_params = GetDefaultInputStreamParameters();
+ AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
+ in_params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+
+ AudioParameters out_params =
+ audio_manager()->GetDefaultOutputStreamParameters();
+ AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
+ out_params, std::string(), std::string());
+ EXPECT_TRUE(aos);
+
+ std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm",
+ in_params.sample_rate(),
+ in_params.frames_per_buffer(),
+ in_params.channels());
+
+ base::WaitableEvent event(false, false);
+ FileAudioSink sink(&event, in_params, file_name);
+ MockAudioOutputCallback source;
+
+ EXPECT_CALL(source, OnMoreData(NotNull(), _)).WillRepeatedly(
+ Invoke(&source, &MockAudioOutputCallback::RealOnMoreData));
+ EXPECT_CALL(source, OnError(aos)).Times(0);
+ EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
+
+ EXPECT_TRUE(ais->Open());
+ EXPECT_TRUE(aos->Open());
+ ais->Start(&sink);
+ aos->Start(&source);
+ LOG(INFO) << ">> Speak into the microphone to record audio";
+ EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
+ aos->Stop();
+ ais->Stop();
+ aos->Close();
+ ais->Close();
+}
+
+// Start audio in both directions while feeding captured data into a FIFO so
+// it can be read directly (in loopback) by the render side. A small extra
+// delay will be added by the FIFO and an estimate of this delay will be
+// printed out during the test.
+// NOTE: this test requires user interaction and is not designed to run as an
+// automatized test on bots.
+TEST_F(AudioAndroidTest,
+ DISABLED_RunSymmetricInputAndOutputStreamsInFullDuplex) {
+ // Get native audio parameters for the input side.
+ AudioParameters default_input_params = GetDefaultInputStreamParameters();
+
+ // Modify the parameters so that both input and output can use the same
+ // parameters by selecting 10ms as buffer size. This will also ensure that
+ // the output stream will be a mono stream since mono is default for input
+ // audio on Android.
+ AudioParameters io_params(default_input_params.format(),
+ default_input_params.channel_layout(),
+ default_input_params.sample_rate(),
+ default_input_params.bits_per_sample(),
+ default_input_params.sample_rate() / 100);
+ VLOG(1) << io_params;
+
+ // Create input and output streams using the common audio parameters.
+ AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
+ io_params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+ AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
+ io_params, std::string(), std::string());
+ EXPECT_TRUE(aos);
+
+ FullDuplexAudioSinkSource full_duplex(io_params);
+
+ // Start a full duplex audio session and print out estimates of the extra
+ // delay we should expect from the FIFO. If real-time delay measurements are
+ // performed, the result should be reduced by this extra delay since it is
+ // something that has been added by the test.
+ EXPECT_TRUE(ais->Open());
+ EXPECT_TRUE(aos->Open());
+ ais->Start(&full_duplex);
+ aos->Start(&full_duplex);
+ VLOG(1) << "HINT: an estimate of the extra FIFO delay will be updated "
+ << "once per second during this test.";
+ LOG(INFO) << ">> Speak into the mic and listen to the audio in loopback...";
+ fflush(stdout);
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(20));
+ printf("\n");
+ aos->Stop();
+ ais->Stop();
+ aos->Close();
+ ais->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
index 164344aba0b..04b226fa64f 100644
--- a/chromium/media/audio/android/audio_manager_android.cc
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -16,6 +16,13 @@
namespace media {
+static void AddDefaultDevice(AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ device_names->push_front(
+ AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
+ AudioManagerBase::kDefaultDeviceId));
+}
+
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 10;
@@ -51,10 +58,13 @@ bool AudioManagerAndroid::HasAudioInputDevices() {
}
void AudioManagerAndroid::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
- device_names->push_front(
- media::AudioDeviceName(kDefaultDeviceName, kDefaultDeviceId));
+ AudioDeviceNames* device_names) {
+ AddDefaultDevice(device_names);
+}
+
+void AudioManagerAndroid::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ AddDefaultDevice(device_names);
}
AudioParameters AudioManagerAndroid::GetInputStreamParameters(
@@ -74,9 +84,12 @@ AudioParameters AudioManagerAndroid::GetInputStreamParameters(
}
AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
AudioOutputStream* stream =
- AudioManagerBase::MakeAudioOutputStream(params, std::string());
+ AudioManagerBase::MakeAudioOutputStream(params, std::string(),
+ std::string());
if (stream && output_stream_count() == 1) {
SetAudioMode(kAudioModeInCommunication);
RegisterHeadsetReceiver();
@@ -87,7 +100,7 @@ AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
AudioInputStream* AudioManagerAndroid::MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) {
AudioInputStream* stream =
- AudioManagerBase::MakeAudioInputStream(params, device_id);
+ AudioManagerBase::MakeAudioInputStream(params, device_id);
return stream;
}
@@ -104,13 +117,16 @@ void AudioManagerAndroid::ReleaseInputStream(AudioInputStream* stream) {
}
AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
- const AudioParameters& params) {
+ const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return new OpenSLESOutputStream(this, params);
}
AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return new OpenSLESOutputStream(this, params);
}
@@ -140,7 +156,10 @@ int AudioManagerAndroid::GetOptimalOutputFrameSize(int sample_rate,
}
AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = GetNativeOutputSampleRate();
int buffer_size = GetOptimalOutputFrameSize(sample_rate, 2);
diff --git a/chromium/media/audio/android/audio_manager_android.h b/chromium/media/audio/android/audio_manager_android.h
index fa1c3736a35..ed2b2c3ce91 100644
--- a/chromium/media/audio/android/audio_manager_android.h
+++ b/chromium/media/audio/android/audio_manager_android.h
@@ -6,6 +6,7 @@
#define MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
#include "base/android/jni_android.h"
+#include "base/gtest_prod_util.h"
#include "media/audio/audio_manager_base.h"
namespace media {
@@ -18,16 +19,20 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
// Implementation of AudioManager.
virtual bool HasAudioOutputDevices() OVERRIDE;
virtual bool HasAudioInputDevices() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
+ virtual void GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ const AudioParameters& params,
+ const std::string& device_id) OVERRIDE;
virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
@@ -36,11 +41,14 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ const AudioParameters& params,
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ const AudioParameters& params,
+ const std::string& device_id) OVERRIDE;
static bool RegisterAudioManager(JNIEnv* env);
@@ -48,6 +56,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
virtual ~AudioManagerAndroid();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
@@ -59,6 +68,9 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
int GetAudioLowLatencyOutputFrameSize();
int GetOptimalOutputFrameSize(int sample_rate, int channels);
+ // Allow the AudioAndroidTest to access private methods.
+ FRIEND_TEST_ALL_PREFIXES(AudioAndroidTest, IsAudioLowLatencySupported);
+
// Java AudioManager instance.
base::android::ScopedJavaGlobalRef<jobject> j_audio_manager_;
diff --git a/chromium/media/audio/android/opensles_input.cc b/chromium/media/audio/android/opensles_input.cc
index 15c3eac3726..a0e4ce3b987 100644
--- a/chromium/media/audio/android/opensles_input.cc
+++ b/chromium/media/audio/android/opensles_input.cc
@@ -4,16 +4,17 @@
#include "media/audio/android/opensles_input.h"
+#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "media/audio/android/audio_manager_android.h"
-#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
- do { \
- SLresult err = (op); \
- if (err != SL_RESULT_SUCCESS) { \
+#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
DLOG(ERROR) << #op << " failed: " << err; \
- return __VA_ARGS__; \
- } \
+ return __VA_ARGS__; \
+ } \
} while (0)
namespace media {
@@ -24,9 +25,10 @@ OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
callback_(NULL),
recorder_(NULL),
simple_buffer_queue_(NULL),
- active_queue_(0),
+ active_buffer_index_(0),
buffer_size_bytes_(0),
started_(false) {
+ DVLOG(2) << "OpenSLESInputStream::OpenSLESInputStream()";
format_.formatType = SL_DATAFORMAT_PCM;
format_.numChannels = static_cast<SLuint32>(params.channels());
// Provides sampling rate in milliHertz to OpenSLES.
@@ -47,6 +49,8 @@ OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
}
OpenSLESInputStream::~OpenSLESInputStream() {
+ DVLOG(2) << "OpenSLESInputStream::~OpenSLESInputStream()";
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!recorder_object_.Get());
DCHECK(!engine_object_.Get());
DCHECK(!recorder_);
@@ -55,6 +59,8 @@ OpenSLESInputStream::~OpenSLESInputStream() {
}
bool OpenSLESInputStream::Open() {
+ DVLOG(2) << "OpenSLESInputStream::Open()";
+ DCHECK(thread_checker_.CalledOnValidThread());
if (engine_object_.Get())
return false;
@@ -67,44 +73,59 @@ bool OpenSLESInputStream::Open() {
}
void OpenSLESInputStream::Start(AudioInputCallback* callback) {
+ DVLOG(2) << "OpenSLESInputStream::Start()";
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(callback);
DCHECK(recorder_);
DCHECK(simple_buffer_queue_);
if (started_)
return;
- // Enable the flags before streaming.
+ base::AutoLock lock(lock_);
+ DCHECK(callback_ == NULL || callback_ == callback);
callback_ = callback;
- active_queue_ = 0;
- started_ = true;
+ active_buffer_index_ = 0;
+ // Enqueues kMaxNumOfBuffersInQueue zero buffers to get the ball rolling.
+ // TODO(henrika): add support for Start/Stop/Start sequences when we are
+ // able to clear the buffer queue. There is currently a bug in the OpenSLES
+ // implementation which forces us to always call Stop() and Close() before
+ // calling Start() again.
SLresult err = SL_RESULT_UNKNOWN_ERROR;
- // Enqueues |kNumOfQueuesInBuffer| zero buffers to get the ball rolling.
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ for (int i = 0; i < kMaxNumOfBuffersInQueue; ++i) {
err = (*simple_buffer_queue_)->Enqueue(
- simple_buffer_queue_,
- audio_data_[i],
- buffer_size_bytes_);
+ simple_buffer_queue_, audio_data_[i], buffer_size_bytes_);
if (SL_RESULT_SUCCESS != err) {
HandleError(err);
+ started_ = false;
return;
}
}
- // Start the recording by setting the state to |SL_RECORDSTATE_RECORDING|.
+ // Start the recording by setting the state to SL_RECORDSTATE_RECORDING.
+ // When the object is in the SL_RECORDSTATE_RECORDING state, adding buffers
+ // will implicitly start the filling process.
err = (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING);
- if (SL_RESULT_SUCCESS != err)
+ if (SL_RESULT_SUCCESS != err) {
HandleError(err);
+ started_ = false;
+ return;
+ }
+
+ started_ = true;
}
void OpenSLESInputStream::Stop() {
+ DVLOG(2) << "OpenSLESInputStream::Stop()";
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!started_)
return;
- // Stop recording by setting the record state to |SL_RECORDSTATE_STOPPED|.
+ base::AutoLock lock(lock_);
+
+ // Stop recording by setting the record state to SL_RECORDSTATE_STOPPED.
LOG_ON_FAILURE_AND_RETURN(
- (*recorder_)->SetRecordState(recorder_,
- SL_RECORDSTATE_STOPPED));
+ (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_STOPPED));
// Clear the buffer queue to get rid of old data when resuming recording.
LOG_ON_FAILURE_AND_RETURN(
@@ -114,17 +135,32 @@ void OpenSLESInputStream::Stop() {
}
void OpenSLESInputStream::Close() {
+ DVLOG(2) << "OpenSLESInputStream::Close()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+
// Stop the stream if it is still recording.
Stop();
+ {
+ base::AutoLock lock(lock_);
+
+ // TODO(henrika): we use |callback_| in Close() but |callback_| is set
+ // in Start(). Hence, it should be cleared in Stop() and not used here.
+ if (callback_) {
+ callback_->OnClose(this);
+ callback_ = NULL;
+ }
- // Explicitly free the player objects and invalidate their associated
- // interfaces. They have to be done in the correct order.
- recorder_object_.Reset();
- engine_object_.Reset();
- simple_buffer_queue_ = NULL;
- recorder_ = NULL;
+ // Destroy the buffer queue recorder object and invalidate all associated
+ // interfaces.
+ recorder_object_.Reset();
+ simple_buffer_queue_ = NULL;
+ recorder_ = NULL;
- ReleaseAudioBuffer();
+ // Destroy the engine object. We don't store any associated interface for
+ // this object.
+ engine_object_.Reset();
+ ReleaseAudioBuffer();
+ }
audio_manager_->ReleaseInputStream(this);
}
@@ -134,9 +170,7 @@ double OpenSLESInputStream::GetMaxVolume() {
return 0.0;
}
-void OpenSLESInputStream::SetVolume(double volume) {
- NOTIMPLEMENTED();
-}
+void OpenSLESInputStream::SetVolume(double volume) { NOTIMPLEMENTED(); }
double OpenSLESInputStream::GetVolume() {
NOTIMPLEMENTED();
@@ -153,54 +187,47 @@ bool OpenSLESInputStream::GetAutomaticGainControl() {
}
bool OpenSLESInputStream::CreateRecorder() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!engine_object_.Get());
+ DCHECK(!recorder_object_.Get());
+ DCHECK(!recorder_);
+ DCHECK(!simple_buffer_queue_);
+
// Initializes the engine object with specific option. After working with the
// object, we need to free the object and its resources.
SLEngineOption option[] = {
- { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) }
- };
- LOG_ON_FAILURE_AND_RETURN(slCreateEngine(engine_object_.Receive(),
- 1,
- option,
- 0,
- NULL,
- NULL),
- false);
+ {SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
+ LOG_ON_FAILURE_AND_RETURN(
+ slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL),
+ false);
// Realize the SL engine object in synchronous mode.
- LOG_ON_FAILURE_AND_RETURN(engine_object_->Realize(engine_object_.Get(),
- SL_BOOLEAN_FALSE),
- false);
+ LOG_ON_FAILURE_AND_RETURN(
+ engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE), false);
// Get the SL engine interface which is implicit.
SLEngineItf engine;
- LOG_ON_FAILURE_AND_RETURN(engine_object_->GetInterface(engine_object_.Get(),
- SL_IID_ENGINE,
- &engine),
+ LOG_ON_FAILURE_AND_RETURN(engine_object_->GetInterface(
+ engine_object_.Get(), SL_IID_ENGINE, &engine),
false);
// Audio source configuration.
SLDataLocator_IODevice mic_locator = {
- SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
- SL_DEFAULTDEVICEID_AUDIOINPUT, NULL
- };
- SLDataSource audio_source = { &mic_locator, NULL };
+ SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
+ SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
+ SLDataSource audio_source = {&mic_locator, NULL};
// Audio sink configuration.
SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, // Locator type.
- static_cast<SLuint32>(kNumOfQueuesInBuffer) // Number of buffers.
- };
- SLDataSink audio_sink = { &buffer_queue, &format_ };
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(kMaxNumOfBuffersInQueue)};
+ SLDataSink audio_sink = {&buffer_queue, &format_};
// Create an audio recorder.
- const SLInterfaceID interface_id[] = {
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
- SL_IID_ANDROIDCONFIGURATION
- };
- const SLboolean interface_required[] = {
- SL_BOOLEAN_TRUE,
- SL_BOOLEAN_TRUE
- };
+ const SLInterfaceID interface_id[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ SL_IID_ANDROIDCONFIGURATION};
+ const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+
// Create AudioRecorder and specify SL_IID_ANDROIDCONFIGURATION.
LOG_ON_FAILURE_AND_RETURN(
(*engine)->CreateAudioRecorder(engine,
@@ -219,24 +246,24 @@ bool OpenSLESInputStream::CreateRecorder() {
&recorder_config),
false);
+ // Uses the main microphone tuned for audio communications.
SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
LOG_ON_FAILURE_AND_RETURN(
(*recorder_config)->SetConfiguration(recorder_config,
SL_ANDROID_KEY_RECORDING_PRESET,
- &stream_type, sizeof(SLint32)),
+ &stream_type,
+ sizeof(SLint32)),
false);
// Realize the recorder object in synchronous mode.
LOG_ON_FAILURE_AND_RETURN(
- recorder_object_->Realize(recorder_object_.Get(),
- SL_BOOLEAN_FALSE),
+ recorder_object_->Realize(recorder_object_.Get(), SL_BOOLEAN_FALSE),
false);
// Get an implicit recorder interface.
LOG_ON_FAILURE_AND_RETURN(
- recorder_object_->GetInterface(recorder_object_.Get(),
- SL_IID_RECORD,
- &recorder_),
+ recorder_object_->GetInterface(
+ recorder_object_.Get(), SL_IID_RECORD, &recorder_),
false);
// Get the simple buffer queue interface.
@@ -249,61 +276,67 @@ bool OpenSLESInputStream::CreateRecorder() {
// Register the input callback for the simple buffer queue.
// This callback will be called when receiving new data from the device.
LOG_ON_FAILURE_AND_RETURN(
- (*simple_buffer_queue_)->RegisterCallback(simple_buffer_queue_,
- SimpleBufferQueueCallback,
- this),
+ (*simple_buffer_queue_)->RegisterCallback(
+ simple_buffer_queue_, SimpleBufferQueueCallback, this),
false);
return true;
}
void OpenSLESInputStream::SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance) {
+ SLAndroidSimpleBufferQueueItf buffer_queue,
+ void* instance) {
OpenSLESInputStream* stream =
reinterpret_cast<OpenSLESInputStream*>(instance);
stream->ReadBufferQueue();
}
void OpenSLESInputStream::ReadBufferQueue() {
+ base::AutoLock lock(lock_);
if (!started_)
return;
- // TODO(xians): Get an accurate delay estimation.
+ TRACE_EVENT0("audio", "OpenSLESOutputStream::ReadBufferQueue");
+
+ // TODO(henrika): Investigate if it is possible to get an accurate
+ // delay estimation.
callback_->OnData(this,
- audio_data_[active_queue_],
+ audio_data_[active_buffer_index_],
buffer_size_bytes_,
buffer_size_bytes_,
0.0);
// Done with this buffer. Send it to device for recording.
- SLresult err = (*simple_buffer_queue_)->Enqueue(
- simple_buffer_queue_,
- audio_data_[active_queue_],
- buffer_size_bytes_);
+ SLresult err =
+ (*simple_buffer_queue_)->Enqueue(simple_buffer_queue_,
+ audio_data_[active_buffer_index_],
+ buffer_size_bytes_);
if (SL_RESULT_SUCCESS != err)
HandleError(err);
- active_queue_ = (active_queue_ + 1) % kNumOfQueuesInBuffer;
+ active_buffer_index_ = (active_buffer_index_ + 1) % kMaxNumOfBuffersInQueue;
}
void OpenSLESInputStream::SetupAudioBuffer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!audio_data_[0]);
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ for (int i = 0; i < kMaxNumOfBuffersInQueue; ++i) {
audio_data_[i] = new uint8[buffer_size_bytes_];
}
}
void OpenSLESInputStream::ReleaseAudioBuffer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (audio_data_[0]) {
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
- delete [] audio_data_[i];
+ for (int i = 0; i < kMaxNumOfBuffersInQueue; ++i) {
+ delete[] audio_data_[i];
audio_data_[i] = NULL;
}
}
}
void OpenSLESInputStream::HandleError(SLresult error) {
- DLOG(FATAL) << "OpenSLES Input error " << error;
+ DLOG(ERROR) << "OpenSLES Input error " << error;
if (callback_)
callback_->OnError(this);
}
diff --git a/chromium/media/audio/android/opensles_input.h b/chromium/media/audio/android/opensles_input.h
index 9743992fc65..e05831c6712 100644
--- a/chromium/media/audio/android/opensles_input.h
+++ b/chromium/media/audio/android/opensles_input.h
@@ -9,18 +9,23 @@
#include <SLES/OpenSLES_Android.h>
#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "media/audio/android/opensles_util.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/android/opensles_util.h"
namespace media {
class AudioManagerAndroid;
// Implements PCM audio input support for Android using the OpenSLES API.
+// This class is created and lives on the Audio Manager thread but recorded
+// audio buffers are given to us from an internal OpenSLES audio thread.
+// All public methods should be called on the Audio Manager thread.
class OpenSLESInputStream : public AudioInputStream {
public:
- static const int kNumOfQueuesInBuffer = 2;
+ static const int kMaxNumOfBuffersInQueue = 2;
OpenSLESInputStream(AudioManagerAndroid* manager,
const AudioParameters& params);
@@ -41,9 +46,12 @@ class OpenSLESInputStream : public AudioInputStream {
private:
bool CreateRecorder();
+ // Called from OpenSLES specific audio worker thread.
static void SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance);
+ SLAndroidSimpleBufferQueueItf buffer_queue,
+ void* instance);
+ // Called from OpenSLES specific audio worker thread.
void ReadBufferQueue();
// Called in Open();
@@ -56,6 +64,12 @@ class OpenSLESInputStream : public AudioInputStream {
// the attached AudioInputCallback::OnError().
void HandleError(SLresult error);
+ base::ThreadChecker thread_checker_;
+
+ // Protects |callback_|, |active_buffer_index_|, |audio_data_|,
+ // |buffer_size_bytes_| and |simple_buffer_queue_|.
+ base::Lock lock_;
+
AudioManagerAndroid* audio_manager_;
AudioInputCallback* callback_;
@@ -73,9 +87,9 @@ class OpenSLESInputStream : public AudioInputStream {
// Audio buffers that are allocated in the constructor based on
// info from audio parameters.
- uint8* audio_data_[kNumOfQueuesInBuffer];
+ uint8* audio_data_[kMaxNumOfBuffersInQueue];
- int active_queue_;
+ int active_buffer_index_;
int buffer_size_bytes_;
bool started_;
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
index c6d455715d9..5643f833c3d 100644
--- a/chromium/media/audio/android/opensles_output.cc
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -8,13 +8,13 @@
#include "base/logging.h"
#include "media/audio/android/audio_manager_android.h"
-#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
- do { \
- SLresult err = (op); \
- if (err != SL_RESULT_SUCCESS) { \
+#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
DLOG(ERROR) << #op << " failed: " << err; \
- return __VA_ARGS__; \
- } \
+ return __VA_ARGS__; \
+ } \
} while (0)
namespace media {
@@ -25,10 +25,11 @@ OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
callback_(NULL),
player_(NULL),
simple_buffer_queue_(NULL),
- active_queue_(0),
+ active_buffer_index_(0),
buffer_size_bytes_(0),
started_(false),
volume_(1.0) {
+ DVLOG(2) << "OpenSLESOutputStream::OpenSLESOutputStream()";
format_.formatType = SL_DATAFORMAT_PCM;
format_.numChannels = static_cast<SLuint32>(params.channels());
// Provides sampling rate in milliHertz to OpenSLES.
@@ -50,6 +51,8 @@ OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
}
OpenSLESOutputStream::~OpenSLESOutputStream() {
+ DVLOG(2) << "OpenSLESOutputStream::~OpenSLESOutputStream()";
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!engine_object_.Get());
DCHECK(!player_object_.Get());
DCHECK(!output_mixer_.Get());
@@ -59,6 +62,8 @@ OpenSLESOutputStream::~OpenSLESOutputStream() {
}
bool OpenSLESOutputStream::Open() {
+ DVLOG(2) << "OpenSLESOutputStream::Open()";
+ DCHECK(thread_checker_.CalledOnValidThread());
if (engine_object_.Get())
return false;
@@ -66,37 +71,46 @@ bool OpenSLESOutputStream::Open() {
return false;
SetupAudioBuffer();
+ active_buffer_index_ = 0;
return true;
}
void OpenSLESOutputStream::Start(AudioSourceCallback* callback) {
+ DVLOG(2) << "OpenSLESOutputStream::Start()";
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(callback);
DCHECK(player_);
DCHECK(simple_buffer_queue_);
if (started_)
return;
- // Enable the flags before streaming.
+ base::AutoLock lock(lock_);
+ DCHECK(callback_ == NULL || callback_ == callback);
callback_ = callback;
- active_queue_ = 0;
- started_ = true;
// Avoid start-up glitches by filling up one buffer queue before starting
// the stream.
- FillBufferQueue();
+ FillBufferQueueNoLock();
- // Start streaming data by setting the play state to |SL_PLAYSTATE_PLAYING|.
+ // Start streaming data by setting the play state to SL_PLAYSTATE_PLAYING.
+ // For a player object, when the object is in the SL_PLAYSTATE_PLAYING
+ // state, adding buffers will implicitly start playback.
LOG_ON_FAILURE_AND_RETURN(
(*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING));
+
+ started_ = true;
}
void OpenSLESOutputStream::Stop() {
+ DVLOG(2) << "OpenSLESOutputStream::Stop()";
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!started_)
return;
- started_ = false;
- // Stop playing by setting the play state to |SL_PLAYSTATE_STOPPED|.
+ base::AutoLock lock(lock_);
+
+ // Stop playing by setting the play state to SL_PLAYSTATE_STOPPED.
LOG_ON_FAILURE_AND_RETURN(
(*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED));
@@ -104,26 +118,48 @@ void OpenSLESOutputStream::Stop() {
// resuming playing.
LOG_ON_FAILURE_AND_RETURN(
(*simple_buffer_queue_)->Clear(simple_buffer_queue_));
+
+#ifndef NDEBUG
+ // Verify that the buffer queue is in fact cleared as it should.
+ SLAndroidSimpleBufferQueueState buffer_queue_state;
+ LOG_ON_FAILURE_AND_RETURN((*simple_buffer_queue_)->GetState(
+ simple_buffer_queue_, &buffer_queue_state));
+ DCHECK_EQ(0u, buffer_queue_state.count);
+ DCHECK_EQ(0u, buffer_queue_state.index);
+#endif
+
+ started_ = false;
}
void OpenSLESOutputStream::Close() {
+ DVLOG(2) << "OpenSLESOutputStream::Close()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+
// Stop the stream if it is still playing.
Stop();
-
- // Explicitly free the player objects and invalidate their associated
- // interfaces. They have to be done in the correct order.
- player_object_.Reset();
- output_mixer_.Reset();
- engine_object_.Reset();
- simple_buffer_queue_ = NULL;
- player_ = NULL;
-
- ReleaseAudioBuffer();
+ {
+ // Destroy the buffer queue player object and invalidate all associated
+ // interfaces.
+ player_object_.Reset();
+ simple_buffer_queue_ = NULL;
+ player_ = NULL;
+
+ // Destroy the mixer object. We don't store any associated interface for
+ // this object.
+ output_mixer_.Reset();
+
+ // Destroy the engine object. We don't store any associated interface for
+ // this object.
+ engine_object_.Reset();
+ ReleaseAudioBuffer();
+ }
audio_manager_->ReleaseOutputStream(this);
}
void OpenSLESOutputStream::SetVolume(double volume) {
+ DVLOG(2) << "OpenSLESOutputStream::SetVolume(" << volume << ")";
+ DCHECK(thread_checker_.CalledOnValidThread());
float volume_float = static_cast<float>(volume);
if (volume_float < 0.0f || volume_float > 1.0f) {
return;
@@ -132,70 +168,61 @@ void OpenSLESOutputStream::SetVolume(double volume) {
}
void OpenSLESOutputStream::GetVolume(double* volume) {
+ DCHECK(thread_checker_.CalledOnValidThread());
*volume = static_cast<double>(volume_);
}
bool OpenSLESOutputStream::CreatePlayer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!engine_object_.Get());
+ DCHECK(!player_object_.Get());
+ DCHECK(!output_mixer_.Get());
+ DCHECK(!player_);
+ DCHECK(!simple_buffer_queue_);
+
// Initializes the engine object with specific option. After working with the
// object, we need to free the object and its resources.
SLEngineOption option[] = {
- { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) }
- };
+ {SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
LOG_ON_FAILURE_AND_RETURN(
slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL),
false);
// Realize the SL engine object in synchronous mode.
LOG_ON_FAILURE_AND_RETURN(
- engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE),
- false);
+ engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE), false);
// Get the SL engine interface which is implicit.
SLEngineItf engine;
- LOG_ON_FAILURE_AND_RETURN(
- engine_object_->GetInterface(engine_object_.Get(),
- SL_IID_ENGINE,
- &engine),
- false);
+ LOG_ON_FAILURE_AND_RETURN(engine_object_->GetInterface(
+ engine_object_.Get(), SL_IID_ENGINE, &engine),
+ false);
// Create ouput mixer object to be used by the player.
- LOG_ON_FAILURE_AND_RETURN(
- (*engine)->CreateOutputMix(engine,
- output_mixer_.Receive(),
- 0,
- NULL,
- NULL),
- false);
+ LOG_ON_FAILURE_AND_RETURN((*engine)->CreateOutputMix(
+ engine, output_mixer_.Receive(), 0, NULL, NULL),
+ false);
// Realizing the output mix object in synchronous mode.
LOG_ON_FAILURE_AND_RETURN(
- output_mixer_->Realize(output_mixer_.Get(), SL_BOOLEAN_FALSE),
- false);
+ output_mixer_->Realize(output_mixer_.Get(), SL_BOOLEAN_FALSE), false);
// Audio source configuration.
SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<SLuint32>(kNumOfQueuesInBuffer)
- };
- SLDataSource audio_source = { &simple_buffer_queue, &format_ };
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(kMaxNumOfBuffersInQueue)};
+ SLDataSource audio_source = {&simple_buffer_queue, &format_};
// Audio sink configuration.
- SLDataLocator_OutputMix locator_output_mix = {
- SL_DATALOCATOR_OUTPUTMIX, output_mixer_.Get()
- };
- SLDataSink audio_sink = { &locator_output_mix, NULL };
+ SLDataLocator_OutputMix locator_output_mix = {SL_DATALOCATOR_OUTPUTMIX,
+ output_mixer_.Get()};
+ SLDataSink audio_sink = {&locator_output_mix, NULL};
// Create an audio player.
- const SLInterfaceID interface_id[] = {
- SL_IID_BUFFERQUEUE,
- SL_IID_VOLUME,
- SL_IID_ANDROIDCONFIGURATION
- };
- const SLboolean interface_required[] = {
- SL_BOOLEAN_TRUE,
- SL_BOOLEAN_TRUE,
- SL_BOOLEAN_TRUE
- };
+ const SLInterfaceID interface_id[] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME,
+ SL_IID_ANDROIDCONFIGURATION};
+ const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
+ SL_BOOLEAN_TRUE};
LOG_ON_FAILURE_AND_RETURN(
(*engine)->CreateAudioPlayer(engine,
player_object_.Receive(),
@@ -209,22 +236,21 @@ bool OpenSLESOutputStream::CreatePlayer() {
// Create AudioPlayer and specify SL_IID_ANDROIDCONFIGURATION.
SLAndroidConfigurationItf player_config;
LOG_ON_FAILURE_AND_RETURN(
- player_object_->GetInterface(player_object_.Get(),
- SL_IID_ANDROIDCONFIGURATION,
- &player_config),
+ player_object_->GetInterface(
+ player_object_.Get(), SL_IID_ANDROIDCONFIGURATION, &player_config),
false);
SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
LOG_ON_FAILURE_AND_RETURN(
(*player_config)->SetConfiguration(player_config,
SL_ANDROID_KEY_STREAM_TYPE,
- &stream_type, sizeof(SLint32)),
+ &stream_type,
+ sizeof(SLint32)),
false);
// Realize the player object in synchronous mode.
LOG_ON_FAILURE_AND_RETURN(
- player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE),
- false);
+ player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE), false);
// Get an implicit player interface.
LOG_ON_FAILURE_AND_RETURN(
@@ -233,72 +259,104 @@ bool OpenSLESOutputStream::CreatePlayer() {
// Get the simple buffer queue interface.
LOG_ON_FAILURE_AND_RETURN(
- player_object_->GetInterface(player_object_.Get(),
- SL_IID_BUFFERQUEUE,
- &simple_buffer_queue_),
+ player_object_->GetInterface(
+ player_object_.Get(), SL_IID_BUFFERQUEUE, &simple_buffer_queue_),
false);
// Register the input callback for the simple buffer queue.
// This callback will be called when the soundcard needs data.
LOG_ON_FAILURE_AND_RETURN(
- (*simple_buffer_queue_)->RegisterCallback(simple_buffer_queue_,
- SimpleBufferQueueCallback,
- this),
+ (*simple_buffer_queue_)->RegisterCallback(
+ simple_buffer_queue_, SimpleBufferQueueCallback, this),
false);
return true;
}
void OpenSLESOutputStream::SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance) {
+ SLAndroidSimpleBufferQueueItf buffer_queue,
+ void* instance) {
OpenSLESOutputStream* stream =
reinterpret_cast<OpenSLESOutputStream*>(instance);
stream->FillBufferQueue();
}
void OpenSLESOutputStream::FillBufferQueue() {
+ base::AutoLock lock(lock_);
if (!started_)
return;
TRACE_EVENT0("audio", "OpenSLESOutputStream::FillBufferQueue");
+
+ // Verify that we are in a playing state.
+ SLuint32 state;
+ SLresult err = (*player_)->GetPlayState(player_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ HandleError(err);
+ return;
+ }
+ if (state != SL_PLAYSTATE_PLAYING) {
+ DLOG(WARNING) << "Received callback in non-playing state";
+ return;
+ }
+
+ // Fill up one buffer in the queue by asking the registered source for
+ // data using the OnMoreData() callback.
+ FillBufferQueueNoLock();
+}
+
+void OpenSLESOutputStream::FillBufferQueueNoLock() {
+ // Ensure that the calling thread has acquired the lock since it is not
+ // done in this method.
+ lock_.AssertAcquired();
+
// Read data from the registered client source.
- // TODO(xians): Get an accurate delay estimation.
- uint32 hardware_delay = buffer_size_bytes_;
+ // TODO(henrika): Investigate if it is possible to get a more accurate
+ // delay estimation.
+ const uint32 hardware_delay = buffer_size_bytes_;
int frames_filled = callback_->OnMoreData(
audio_bus_.get(), AudioBuffersState(0, hardware_delay));
- if (frames_filled <= 0)
- return; // Audio source is shutting down, or halted on error.
- int num_filled_bytes =
+ if (frames_filled <= 0) {
+ // Audio source is shutting down, or halted on error.
+ return;
+ }
+
+ // Note: If the internal representation ever changes from 16-bit PCM to
+ // raw float, the data must be clipped and sanitized since it may come
+ // from an untrusted source such as NaCl.
+ audio_bus_->Scale(volume_);
+ audio_bus_->ToInterleaved(frames_filled,
+ format_.bitsPerSample / 8,
+ audio_data_[active_buffer_index_]);
+
+ const int num_filled_bytes =
frames_filled * audio_bus_->channels() * format_.bitsPerSample / 8;
DCHECK_LE(static_cast<size_t>(num_filled_bytes), buffer_size_bytes_);
- // Note: If this ever changes to output raw float the data must be clipped and
- // sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->Scale(volume_);
- audio_bus_->ToInterleaved(
- frames_filled, format_.bitsPerSample / 8, audio_data_[active_queue_]);
// Enqueue the buffer for playback.
- SLresult err = (*simple_buffer_queue_)->Enqueue(
- simple_buffer_queue_,
- audio_data_[active_queue_],
- num_filled_bytes);
+ SLresult err =
+ (*simple_buffer_queue_)->Enqueue(simple_buffer_queue_,
+ audio_data_[active_buffer_index_],
+ num_filled_bytes);
if (SL_RESULT_SUCCESS != err)
HandleError(err);
- active_queue_ = (active_queue_ + 1) % kNumOfQueuesInBuffer;
+ active_buffer_index_ = (active_buffer_index_ + 1) % kMaxNumOfBuffersInQueue;
}
void OpenSLESOutputStream::SetupAudioBuffer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!audio_data_[0]);
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ for (int i = 0; i < kMaxNumOfBuffersInQueue; ++i) {
audio_data_[i] = new uint8[buffer_size_bytes_];
}
}
void OpenSLESOutputStream::ReleaseAudioBuffer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (audio_data_[0]) {
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
- delete [] audio_data_[i];
+ for (int i = 0; i < kMaxNumOfBuffersInQueue; ++i) {
+ delete[] audio_data_[i];
audio_data_[i] = NULL;
}
}
diff --git a/chromium/media/audio/android/opensles_output.h b/chromium/media/audio/android/opensles_output.h
index f505b5165cd..7232d5da5f7 100644
--- a/chromium/media/audio/android/opensles_output.h
+++ b/chromium/media/audio/android/opensles_output.h
@@ -9,6 +9,8 @@
#include <SLES/OpenSLES_Android.h>
#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
#include "media/audio/android/opensles_util.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
@@ -18,9 +20,12 @@ namespace media {
class AudioManagerAndroid;
// Implements PCM audio output support for Android using the OpenSLES API.
+// This class is created and lives on the Audio Manager thread but recorded
+// audio buffers are given to us from an internal OpenSLES audio thread.
+// All public methods should be called on the Audio Manager thread.
class OpenSLESOutputStream : public AudioOutputStream {
public:
- static const int kNumOfQueuesInBuffer = 2;
+ static const int kMaxNumOfBuffersInQueue = 2;
OpenSLESOutputStream(AudioManagerAndroid* manager,
const AudioParameters& params);
@@ -38,11 +43,18 @@ class OpenSLESOutputStream : public AudioOutputStream {
private:
bool CreatePlayer();
+ // Called from OpenSLES specific audio worker thread.
static void SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance);
+ SLAndroidSimpleBufferQueueItf buffer_queue,
+ void* instance);
+ // Fills up one buffer by asking the registered source for data.
+ // Called from OpenSLES specific audio worker thread.
void FillBufferQueue();
+ // Called from the audio manager thread.
+ void FillBufferQueueNoLock();
+
// Called in Open();
void SetupAudioBuffer();
@@ -53,6 +65,12 @@ class OpenSLESOutputStream : public AudioOutputStream {
// the attached AudioOutputCallback::OnError().
void HandleError(SLresult error);
+ base::ThreadChecker thread_checker_;
+
+ // Protects |callback_|, |active_buffer_index_|, |audio_data_|,
+ // |buffer_size_bytes_| and |simple_buffer_queue_|.
+ base::Lock lock_;
+
AudioManagerAndroid* audio_manager_;
AudioSourceCallback* callback_;
@@ -69,10 +87,11 @@ class OpenSLESOutputStream : public AudioOutputStream {
SLDataFormat_PCM format_;
- // Audio buffer arrays that are allocated in the constructor.
- uint8* audio_data_[kNumOfQueuesInBuffer];
+ // Audio buffers that are allocated in the constructor based on
+ // info from audio parameters.
+ uint8* audio_data_[kMaxNumOfBuffersInQueue];
- int active_queue_;
+ int active_buffer_index_;
size_t buffer_size_bytes_;
bool started_;
@@ -88,4 +107,4 @@ class OpenSLESOutputStream : public AudioOutputStream {
} // namespace media
-#endif // MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
+#endif // MEDIA_AUDIO_ANDROID_OPENSLES_OUTPUT_H_
diff --git a/chromium/media/audio/android/opensles_wrapper.cc b/chromium/media/audio/android/opensles_wrapper.cc
new file mode 100644
index 00000000000..b8f9ea45e4d
--- /dev/null
+++ b/chromium/media/audio/android/opensles_wrapper.cc
@@ -0,0 +1,109 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The file defines the symbols from OpenSLES that android is using. It then
+// loads the library dynamically on first use.
+
+// The openSLES API is using constant as part of the API. This file will define
+// proxies for those constants and redefine those when the library is first
+// loaded. For this, it need to be able to change their content and so import
+// the headers without const. This is correct because OpenSLES.h is a C API.
+#define const
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#undef const
+
+#include "base/basictypes.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/native_library.h"
+
+// The constants used in chromium. SLInterfaceID is actually a pointer to
+// SLInterfaceID_. Those symbols are defined as extern symbols in the OpenSLES
+// headers. They will be initialized to their correct values when the library is
+// loaded.
+SLInterfaceID SL_IID_ENGINE = NULL;
+SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE = NULL;
+SLInterfaceID SL_IID_ANDROIDCONFIGURATION = NULL;
+SLInterfaceID SL_IID_RECORD = NULL;
+SLInterfaceID SL_IID_BUFFERQUEUE = NULL;
+SLInterfaceID SL_IID_VOLUME = NULL;
+SLInterfaceID SL_IID_PLAY = NULL;
+
+namespace {
+
+// The name of the library to load.
+const char kOpenSLLibraryName[] = "libOpenSLES.so";
+
+// Loads the OpenSLES library, and initializes all the proxies.
+base::NativeLibrary IntializeLibraryHandle() {
+ base::NativeLibrary handle =
+ base::LoadNativeLibrary(base::FilePath(kOpenSLLibraryName), NULL);
+ DCHECK(handle) << "Unable to load " << kOpenSLLibraryName;
+
+ // Setup the proxy for each symbol.
+ // Attach the symbol name to the proxy address.
+ struct SymbolDefinition {
+ const char* name;
+ SLInterfaceID* sl_iid;
+ };
+
+ // The list of defined symbols.
+ const SymbolDefinition kSymbols[] = {
+ {"SL_IID_ENGINE", &SL_IID_ENGINE},
+ {"SL_IID_ANDROIDSIMPLEBUFFERQUEUE", &SL_IID_ANDROIDSIMPLEBUFFERQUEUE},
+ {"SL_IID_ANDROIDCONFIGURATION", &SL_IID_ANDROIDCONFIGURATION},
+ {"SL_IID_RECORD", &SL_IID_RECORD},
+ {"SL_IID_BUFFERQUEUE", &SL_IID_BUFFERQUEUE},
+ {"SL_IID_VOLUME", &SL_IID_VOLUME},
+ {"SL_IID_PLAY", &SL_IID_PLAY}
+ };
+
+ for (size_t i = 0; i < sizeof(kSymbols) / sizeof(kSymbols[0]); ++i) {
+ memcpy(kSymbols[i].sl_iid,
+ base::GetFunctionPointerFromNativeLibrary(handle, kSymbols[i].name),
+ sizeof(SLInterfaceID));
+ DCHECK(*kSymbols[i].sl_iid) << "Unable to find symbol for "
+ << kSymbols[i].name;
+ }
+ return handle;
+}
+
+// Returns the handler to the shared library. The library itself will be lazily
+// loaded during the first call to this function.
+base::NativeLibrary LibraryHandle() {
+ // The handle is lazily initialized on the first call.
+ static base::NativeLibrary g_opensles_LibraryHandle =
+ IntializeLibraryHandle();
+ return g_opensles_LibraryHandle;
+}
+
+} // namespace
+
+// Redefine slCreateEngine symbol.
+SLresult slCreateEngine(SLObjectItf* engine,
+ SLuint32 num_options,
+ SLEngineOption* engine_options,
+ SLuint32 num_interfaces,
+ SLInterfaceID* interface_ids,
+ SLboolean* interfaces_required) {
+ typedef SLresult (*SlCreateEngineSignature)(SLObjectItf*,
+ SLuint32,
+ SLEngineOption*,
+ SLuint32,
+ SLInterfaceID*,
+ SLboolean*);
+ static SlCreateEngineSignature g_sl_create_engine_handle =
+ reinterpret_cast<SlCreateEngineSignature>(
+ base::GetFunctionPointerFromNativeLibrary(LibraryHandle(),
+ "slCreateEngine"));
+ DCHECK(g_sl_create_engine_handle)
+ << "Unable to find symbol for slCreateEngine";
+ return g_sl_create_engine_handle(engine,
+ num_options,
+ engine_options,
+ num_interfaces,
+ interface_ids,
+ interfaces_required);
+}
diff --git a/chromium/media/audio/async_socket_io_handler.h b/chromium/media/audio/async_socket_io_handler.h
deleted file mode 100644
index cc7185eb243..00000000000
--- a/chromium/media/audio/async_socket_io_handler.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
-#define MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
-
-#include "base/message_loop/message_loop.h"
-#include "base/sync_socket.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// The message loop callback interface is different based on platforms.
-#if defined(OS_WIN)
-typedef base::MessageLoopForIO::IOHandler MessageLoopIOHandler;
-#elif defined(OS_POSIX)
-typedef base::MessageLoopForIO::Watcher MessageLoopIOHandler;
-#endif
-
-// Extends the CancelableSyncSocket class to allow reading from a socket
-// asynchronously on a TYPE_IO message loop thread. This makes it easy to share
-// a thread that uses a message loop (e.g. for IPC and other things) and not
-// require a separate thread to read from the socket.
-//
-// Example usage (also see the unit tests):
-//
-// class SocketReader {
-// public:
-// SocketReader(base::CancelableSyncSocket* socket)
-// : socket_(socket), buffer_() {
-// io_handler.Initialize(socket_->handle(),
-// base::Bind(&SocketReader::OnDataAvailable,
-// base::Unretained(this));
-// }
-//
-// void AsyncRead() {
-// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
-// }
-//
-// private:
-// void OnDataAvailable(int bytes_read) {
-// if (ProcessData(&buffer_[0], bytes_read)) {
-// // Issue another read.
-// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
-// }
-// }
-//
-// media::AsyncSocketIoHandler io_handler;
-// base::CancelableSyncSocket* socket_;
-// char buffer_[kBufferSize];
-// };
-//
-class MEDIA_EXPORT AsyncSocketIoHandler
- : public NON_EXPORTED_BASE(base::NonThreadSafe),
- public NON_EXPORTED_BASE(MessageLoopIOHandler) {
- public:
- AsyncSocketIoHandler();
- virtual ~AsyncSocketIoHandler();
-
- // Type definition for the callback. The parameter tells how many
- // bytes were read and is 0 if an error occurred.
- typedef base::Callback<void(int)> ReadCompleteCallback;
-
- // Initializes the AsyncSocketIoHandler by hooking it up to the current
- // thread's message loop (must be TYPE_IO), to do async reads from the socket
- // on the current thread. The |callback| will be invoked whenever a Read()
- // has completed.
- bool Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback);
-
- // Attempts to read from the socket. The return value will be |false|
- // if an error occurred and |true| if data was read or a pending read
- // was issued. Regardless of async or sync operation, the
- // ReadCompleteCallback (see above) will be called when data is available.
- bool Read(char* buffer, int buffer_len);
-
- private:
-#if defined(OS_WIN)
- // Implementation of IOHandler on Windows.
- virtual void OnIOCompleted(base::MessageLoopForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) OVERRIDE;
-#elif defined(OS_POSIX)
- // Implementation of base::MessageLoopForIO::Watcher.
- virtual void OnFileCanWriteWithoutBlocking(int socket) OVERRIDE {}
- virtual void OnFileCanReadWithoutBlocking(int socket) OVERRIDE;
-
- void EnsureWatchingSocket();
-#endif
-
- base::SyncSocket::Handle socket_;
-#if defined(OS_WIN)
- base::MessageLoopForIO::IOContext* context_;
- bool is_pending_;
-#elif defined(OS_POSIX)
- base::MessageLoopForIO::FileDescriptorWatcher socket_watcher_;
- // |pending_buffer_| and |pending_buffer_len_| are valid only between
- // Read() and OnFileCanReadWithoutBlocking().
- char* pending_buffer_;
- int pending_buffer_len_;
- // |true| iff the message loop is watching the socket for IO events.
- bool is_watching_;
-#endif
- ReadCompleteCallback read_complete_;
-
- DISALLOW_COPY_AND_ASSIGN(AsyncSocketIoHandler);
-};
-
-} // namespace media.
-
-#endif // MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
diff --git a/chromium/media/audio/async_socket_io_handler_posix.cc b/chromium/media/audio/async_socket_io_handler_posix.cc
deleted file mode 100644
index be8f3708cb7..00000000000
--- a/chromium/media/audio/async_socket_io_handler_posix.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/async_socket_io_handler.h"
-
-#include <fcntl.h>
-
-#include "base/posix/eintr_wrapper.h"
-
-namespace media {
-
-AsyncSocketIoHandler::AsyncSocketIoHandler()
- : socket_(base::SyncSocket::kInvalidHandle),
- pending_buffer_(NULL),
- pending_buffer_len_(0),
- is_watching_(false) {
-}
-
-AsyncSocketIoHandler::~AsyncSocketIoHandler() {
- DCHECK(CalledOnValidThread());
-}
-
-void AsyncSocketIoHandler::OnFileCanReadWithoutBlocking(int socket) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(socket, socket_);
- DCHECK(!read_complete_.is_null());
-
- if (pending_buffer_) {
- int bytes_read = HANDLE_EINTR(read(socket_, pending_buffer_,
- pending_buffer_len_));
- DCHECK_GE(bytes_read, 0);
- pending_buffer_ = NULL;
- pending_buffer_len_ = 0;
- read_complete_.Run(bytes_read > 0 ? bytes_read : 0);
- } else {
- // We're getting notifications that we can read from the socket while
- // we're not waiting for data. In order to not starve the message loop,
- // let's stop watching the fd and restart the watch when Read() is called.
- is_watching_ = false;
- socket_watcher_.StopWatchingFileDescriptor();
- }
-}
-
-bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
- DCHECK(CalledOnValidThread());
- DCHECK(!read_complete_.is_null());
- DCHECK(!pending_buffer_);
-
- EnsureWatchingSocket();
-
- int bytes_read = HANDLE_EINTR(read(socket_, buffer, buffer_len));
- if (bytes_read < 0) {
- if (errno == EAGAIN) {
- pending_buffer_ = buffer;
- pending_buffer_len_ = buffer_len;
- } else {
- NOTREACHED() << "read(): " << errno;
- return false;
- }
- } else {
- read_complete_.Run(bytes_read);
- }
- return true;
-}
-
-bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback) {
- DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
-
- DetachFromThread();
-
- socket_ = socket;
- read_complete_ = callback;
-
- // SyncSocket is blocking by default, so let's convert it to non-blocking.
- int value = fcntl(socket, F_GETFL);
- if (!(value & O_NONBLOCK)) {
- // Set the socket to be non-blocking so we can do async reads.
- if (fcntl(socket, F_SETFL, O_NONBLOCK) == -1) {
- NOTREACHED();
- return false;
- }
- }
-
- return true;
-}
-
-void AsyncSocketIoHandler::EnsureWatchingSocket() {
- DCHECK(CalledOnValidThread());
- if (!is_watching_ && socket_ != base::SyncSocket::kInvalidHandle) {
- is_watching_ = base::MessageLoopForIO::current()->WatchFileDescriptor(
- socket_, true, base::MessageLoopForIO::WATCH_READ,
- &socket_watcher_, this);
- }
-}
-
-} // namespace media.
diff --git a/chromium/media/audio/async_socket_io_handler_unittest.cc b/chromium/media/audio/async_socket_io_handler_unittest.cc
deleted file mode 100644
index ae971464dbc..00000000000
--- a/chromium/media/audio/async_socket_io_handler_unittest.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/async_socket_io_handler.h"
-
-#include "base/bind.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-const char kAsyncSocketIoTestString[] = "Hello, AsyncSocketIoHandler";
-const size_t kAsyncSocketIoTestStringLength =
- arraysize(kAsyncSocketIoTestString);
-
-class TestSocketReader {
- public:
- // Set |number_of_reads_before_quit| to >0 when you expect a specific number
- // of Read operations to complete. Once that number is reached, the current
- // message loop will be Quit(). Set |number_of_reads_before_quit| to -1 if
- // callbacks should not be counted.
- TestSocketReader(base::CancelableSyncSocket* socket,
- int number_of_reads_before_quit,
- bool issue_reads_from_callback,
- bool expect_eof)
- : socket_(socket), buffer_(),
- number_of_reads_before_quit_(number_of_reads_before_quit),
- callbacks_received_(0),
- issue_reads_from_callback_(issue_reads_from_callback),
- expect_eof_(expect_eof) {
- io_handler.Initialize(socket_->handle(),
- base::Bind(&TestSocketReader::OnRead,
- base::Unretained(this)));
- }
- ~TestSocketReader() {}
-
- bool IssueRead() {
- return io_handler.Read(&buffer_[0], sizeof(buffer_));
- }
-
- const char* buffer() const { return &buffer_[0]; }
-
- int callbacks_received() const { return callbacks_received_; }
-
- private:
- void OnRead(int bytes_read) {
- if (!expect_eof_) {
- EXPECT_GT(bytes_read, 0);
- } else {
- EXPECT_GE(bytes_read, 0);
- }
- ++callbacks_received_;
- if (number_of_reads_before_quit_ == callbacks_received_) {
- base::MessageLoop::current()->Quit();
- } else if (issue_reads_from_callback_) {
- IssueRead();
- }
- }
-
- media::AsyncSocketIoHandler io_handler;
- base::CancelableSyncSocket* socket_; // Ownership lies outside the class.
- char buffer_[kAsyncSocketIoTestStringLength];
- int number_of_reads_before_quit_;
- int callbacks_received_;
- bool issue_reads_from_callback_;
- bool expect_eof_;
-};
-
-// Workaround to be able to use a base::Closure for sending data.
-// Send() returns int but a closure must return void.
-void SendData(base::CancelableSyncSocket* socket,
- const void* buffer,
- size_t length) {
- socket->Send(buffer, length);
-}
-
-} // end namespace.
-
-// Tests doing a pending read from a socket and use an IO handler to get
-// notified of data.
-TEST(AsyncSocketIoHandlerTest, AsynchronousReadWithMessageLoop) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- TestSocketReader reader(&pair[0], 1, false, false);
- EXPECT_TRUE(reader.IssueRead());
-
- pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
- base::MessageLoop::current()->Run();
- EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
- EXPECT_EQ(1, reader.callbacks_received());
-}
-
-// Tests doing a read from a socket when we know that there is data in the
-// socket. Here we want to make sure that any async 'can read' notifications
-// won't trip us off and that the synchronous case works as well.
-TEST(AsyncSocketIoHandlerTest, SynchronousReadWithMessageLoop) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- TestSocketReader reader(&pair[0], -1, false, false);
-
- pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
- base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
- base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(100));
- base::MessageLoop::current()->Run();
-
- EXPECT_TRUE(reader.IssueRead());
- EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
- // We've now verified that the read happened synchronously, but it's not
- // guaranteed that the callback has been issued since the callback will be
- // called asynchronously even though the read may have been done.
- // So we call RunUntilIdle() to allow any event notifications or APC's on
- // Windows, to execute before checking the count of how many callbacks we've
- // received.
- base::MessageLoop::current()->RunUntilIdle();
- EXPECT_EQ(1, reader.callbacks_received());
-}
-
-// Calls Read() from within a callback to test that simple read "loops" work.
-TEST(AsyncSocketIoHandlerTest, ReadFromCallback) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- const int kReadOperationCount = 10;
- TestSocketReader reader(&pair[0], kReadOperationCount, true, false);
- EXPECT_TRUE(reader.IssueRead());
-
- // Issue sends on an interval to satisfy the Read() requirements.
- int64 milliseconds = 0;
- for (int i = 0; i < kReadOperationCount; ++i) {
- base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
- base::Bind(&SendData, &pair[1], kAsyncSocketIoTestString,
- kAsyncSocketIoTestStringLength),
- base::TimeDelta::FromMilliseconds(milliseconds));
- milliseconds += 10;
- }
-
- base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
- base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(100 + milliseconds));
-
- base::MessageLoop::current()->Run();
- EXPECT_EQ(kReadOperationCount, reader.callbacks_received());
-}
-
-// Calls Read() then close other end, check that a correct callback is received.
-TEST(AsyncSocketIoHandlerTest, ReadThenClose) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- const int kReadOperationCount = 1;
- TestSocketReader reader(&pair[0], kReadOperationCount, false, true);
- EXPECT_TRUE(reader.IssueRead());
-
- pair[1].Close();
-
- base::MessageLoop::current()->Run();
- EXPECT_EQ(kReadOperationCount, reader.callbacks_received());
-}
diff --git a/chromium/media/audio/async_socket_io_handler_win.cc b/chromium/media/audio/async_socket_io_handler_win.cc
deleted file mode 100644
index ea6bd4ad0d5..00000000000
--- a/chromium/media/audio/async_socket_io_handler_win.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/async_socket_io_handler.h"
-
-namespace media {
-
-AsyncSocketIoHandler::AsyncSocketIoHandler()
- : socket_(base::SyncSocket::kInvalidHandle),
- context_(NULL),
- is_pending_(false) {}
-
-AsyncSocketIoHandler::~AsyncSocketIoHandler() {
- // We need to be deleted on the correct thread to avoid racing with the
- // message loop thread.
- DCHECK(CalledOnValidThread());
-
- if (context_) {
- if (is_pending_) {
- // Make the context be deleted by the message pump when done.
- context_->handler = NULL;
- } else {
- delete context_;
- }
- }
-}
-
-// Implementation of IOHandler on Windows.
-void AsyncSocketIoHandler::OnIOCompleted(
- base::MessageLoopForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(context_, context);
- DCHECK(!read_complete_.is_null());
- is_pending_ = false;
- read_complete_.Run(error == ERROR_SUCCESS ? bytes_transfered : 0);
-}
-
-bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
- DCHECK(CalledOnValidThread());
- DCHECK(!read_complete_.is_null());
- DCHECK(!is_pending_);
- DCHECK_NE(socket_, base::SyncSocket::kInvalidHandle);
-
- DWORD bytes_read = 0;
- BOOL ok = ::ReadFile(socket_, buffer, buffer_len, &bytes_read,
- &context_->overlapped);
- // The completion port will be signaled regardless of completing the read
- // straight away or asynchronously (ERROR_IO_PENDING). OnIOCompleted() will
- // be called regardless and we don't need to explicitly run the callback
- // in the case where ok is FALSE and GLE==ERROR_IO_PENDING.
- is_pending_ = !ok && (GetLastError() == ERROR_IO_PENDING);
- return ok || is_pending_;
-}
-
-bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback) {
- DCHECK(!context_);
- DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
-
- DetachFromThread();
-
- socket_ = socket;
- read_complete_ = callback;
-
- base::MessageLoopForIO::current()->RegisterIOHandler(socket, this);
-
- context_ = new base::MessageLoopForIO::IOContext();
- context_->handler = this;
- memset(&context_->overlapped, 0, sizeof(context_->overlapped));
-
- return true;
-}
-
-} // namespace media.
diff --git a/chromium/media/audio/audio_input_controller.cc b/chromium/media/audio/audio_input_controller.cc
index 31e137e2b17..ef94d1274d6 100644
--- a/chromium/media/audio/audio_input_controller.cc
+++ b/chromium/media/audio/audio_input_controller.cc
@@ -8,6 +8,7 @@
#include "base/threading/thread_restrictions.h"
#include "media/base/limits.h"
#include "media/base/scoped_histogram_timer.h"
+#include "media/base/user_input_monitor.h"
namespace {
const int kMaxInputChannels = 2;
@@ -18,16 +19,10 @@ const int kMaxInputChannels = 2;
// breakage (very hard to repro bugs!) on other platforms: See
// http://crbug.com/226327 and http://crbug.com/230972.
const int kTimerResetIntervalSeconds = 1;
-#if defined(OS_IOS)
-// The first callback on iOS is received after the current background
-// audio has faded away.
-const int kTimerInitialIntervalSeconds = 4;
-#else
// We have received reports that the timer can be too trigger happy on some
// Mac devices and the initial timer interval has therefore been increased
// from 1 second to 5 seconds.
const int kTimerInitialIntervalSeconds = 5;
-#endif // defined(OS_IOS)
}
namespace media {
@@ -36,14 +31,17 @@ namespace media {
AudioInputController::Factory* AudioInputController::factory_ = NULL;
AudioInputController::AudioInputController(EventHandler* handler,
- SyncWriter* sync_writer)
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor)
: creator_loop_(base::MessageLoopProxy::current()),
handler_(handler),
stream_(NULL),
data_is_active_(false),
state_(kEmpty),
sync_writer_(sync_writer),
- max_volume_(0.0) {
+ max_volume_(0.0),
+ user_input_monitor_(user_input_monitor),
+ prev_key_down_count_(0) {
DCHECK(creator_loop_.get());
}
@@ -56,17 +54,19 @@ scoped_refptr<AudioInputController> AudioInputController::Create(
AudioManager* audio_manager,
EventHandler* event_handler,
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ UserInputMonitor* user_input_monitor) {
DCHECK(audio_manager);
if (!params.IsValid() || (params.channels() > kMaxInputChannels))
return NULL;
- if (factory_)
- return factory_->Create(audio_manager, event_handler, params);
-
- scoped_refptr<AudioInputController> controller(new AudioInputController(
- event_handler, NULL));
+ if (factory_) {
+ return factory_->Create(
+ audio_manager, event_handler, params, user_input_monitor);
+ }
+ scoped_refptr<AudioInputController> controller(
+ new AudioInputController(event_handler, NULL, user_input_monitor));
controller->message_loop_ = audio_manager->GetMessageLoop();
@@ -87,7 +87,8 @@ scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
EventHandler* event_handler,
const AudioParameters& params,
const std::string& device_id,
- SyncWriter* sync_writer) {
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor) {
DCHECK(audio_manager);
DCHECK(sync_writer);
@@ -96,8 +97,8 @@ scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
// Create the AudioInputController object and ensure that it runs on
// the audio-manager thread.
- scoped_refptr<AudioInputController> controller(new AudioInputController(
- event_handler, sync_writer));
+ scoped_refptr<AudioInputController> controller(
+ new AudioInputController(event_handler, sync_writer, user_input_monitor));
controller->message_loop_ = audio_manager->GetMessageLoop();
// Create and open a new audio input stream from the existing
@@ -116,14 +117,15 @@ scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
const scoped_refptr<base::MessageLoopProxy>& message_loop,
EventHandler* event_handler,
AudioInputStream* stream,
- SyncWriter* sync_writer) {
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor) {
DCHECK(sync_writer);
DCHECK(stream);
// Create the AudioInputController object and ensure that it runs on
// the audio-manager thread.
- scoped_refptr<AudioInputController> controller(new AudioInputController(
- event_handler, sync_writer));
+ scoped_refptr<AudioInputController> controller(
+ new AudioInputController(event_handler, sync_writer, user_input_monitor));
controller->message_loop_ = message_loop;
// TODO(miu): See TODO at top of file. Until that's resolved, we need to
@@ -171,7 +173,7 @@ void AudioInputController::DoCreate(AudioManager* audio_manager,
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CreateTime");
// TODO(miu): See TODO at top of file. Until that's resolved, assume all
// platform audio input requires the |no_data_timer_| be used to auto-detect
- // errors. In reality, probably only Windows and IOS need to be treated as
+ // errors. In reality, probably only Windows needs to be treated as
// unreliable here.
DoCreateForStream(audio_manager->MakeAudioInputStream(params, device_id),
true);
@@ -211,6 +213,11 @@ void AudioInputController::DoCreateForStream(
state_ = kCreated;
handler_->OnCreated(this);
+
+ if (user_input_monitor_) {
+ user_input_monitor_->EnableKeyPressMonitoring();
+ prev_key_down_count_ = user_input_monitor_->GetKeyPressCount();
+ }
}
void AudioInputController::DoRecord() {
@@ -251,6 +258,9 @@ void AudioInputController::DoClose() {
}
state_ = kClosed;
+
+ if (user_input_monitor_)
+ user_input_monitor_->DisableKeyPressMonitoring();
}
}
@@ -317,8 +327,10 @@ void AudioInputController::DoCheckForNoData() {
base::Unretained(this)));
}
-void AudioInputController::OnData(AudioInputStream* stream, const uint8* data,
- uint32 size, uint32 hardware_delay_bytes,
+void AudioInputController::OnData(AudioInputStream* stream,
+ const uint8* data,
+ uint32 size,
+ uint32 hardware_delay_bytes,
double volume) {
{
base::AutoLock auto_lock(lock_);
@@ -326,13 +338,21 @@ void AudioInputController::OnData(AudioInputStream* stream, const uint8* data,
return;
}
+ bool key_pressed = false;
+ if (user_input_monitor_) {
+ size_t current_count = user_input_monitor_->GetKeyPressCount();
+ key_pressed = current_count != prev_key_down_count_;
+ prev_key_down_count_ = current_count;
+ DVLOG_IF(6, key_pressed) << "Detected keypress.";
+ }
+
// Mark data as active to ensure that the periodic calls to
// DoCheckForNoData() does not report an error to the event handler.
SetDataIsActive(true);
// Use SyncSocket if we are in a low-latency mode.
if (LowLatencyMode()) {
- sync_writer_->Write(data, size, volume);
+ sync_writer_->Write(data, size, volume, key_pressed);
sync_writer_->UpdateRecordedBytes(hardware_delay_bytes);
return;
}
@@ -354,7 +374,7 @@ void AudioInputController::OnError(AudioInputStream* stream) {
}
void AudioInputController::DoStopCloseAndClearStream(
- base::WaitableEvent *done) {
+ base::WaitableEvent* done) {
DCHECK(message_loop_->BelongsToCurrentThread());
// Allow calling unconditionally and bail if we don't have a stream to close.
diff --git a/chromium/media/audio/audio_input_controller.h b/chromium/media/audio/audio_input_controller.h
index 586d47703a1..6b40459ded6 100644
--- a/chromium/media/audio/audio_input_controller.h
+++ b/chromium/media/audio/audio_input_controller.h
@@ -72,6 +72,8 @@
//
namespace media {
+class UserInputMonitor;
+
class MEDIA_EXPORT AudioInputController
: public base::RefCountedThreadSafe<AudioInputController>,
public AudioInputStream::AudioInputCallback {
@@ -102,7 +104,10 @@ class MEDIA_EXPORT AudioInputController
// Write certain amount of data from |data|. This method returns
// number of written bytes.
- virtual uint32 Write(const void* data, uint32 size, double volume) = 0;
+ virtual uint32 Write(const void* data,
+ uint32 size,
+ double volume,
+ bool key_pressed) = 0;
// Close this synchronous writer.
virtual void Close() = 0;
@@ -110,11 +115,15 @@ class MEDIA_EXPORT AudioInputController
// AudioInputController::Create() can use the currently registered Factory
// to create the AudioInputController. Factory is intended for testing only.
+ // |user_input_monitor| is used for typing detection and can be NULL.
class Factory {
public:
- virtual AudioInputController* Create(AudioManager* audio_manager,
- EventHandler* event_handler,
- AudioParameters params) = 0;
+ virtual AudioInputController* Create(
+ AudioManager* audio_manager,
+ EventHandler* event_handler,
+ AudioParameters params,
+ UserInputMonitor* user_input_monitor) = 0;
+
protected:
virtual ~Factory() {}
};
@@ -123,11 +132,13 @@ class MEDIA_EXPORT AudioInputController
// The audio device will be created on the audio thread, and when that is
// done, the event handler will receive an OnCreated() call from that same
// thread. |device_id| is the unique ID of the audio device to be opened.
+ // |user_input_monitor| is used for typing detection and can be NULL.
static scoped_refptr<AudioInputController> Create(
AudioManager* audio_manager,
EventHandler* event_handler,
const AudioParameters& params,
- const std::string& device_id);
+ const std::string& device_id,
+ UserInputMonitor* user_input_monitor);
// Sets the factory used by the static method Create(). AudioInputController
// does not take ownership of |factory|. A value of NULL results in an
@@ -138,25 +149,28 @@ class MEDIA_EXPORT AudioInputController
// Factory method for creating an AudioInputController for low-latency mode.
// The audio device will be created on the audio thread, and when that is
// done, the event handler will receive an OnCreated() call from that same
- // thread.
+ // thread. |user_input_monitor| is used for typing detection and can be NULL.
static scoped_refptr<AudioInputController> CreateLowLatency(
AudioManager* audio_manager,
EventHandler* event_handler,
const AudioParameters& params,
const std::string& device_id,
// External synchronous writer for audio controller.
- SyncWriter* sync_writer);
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor);
// Factory method for creating an AudioInputController for low-latency mode,
// taking ownership of |stream|. The stream will be opened on the audio
// thread, and when that is done, the event handler will receive an
- // OnCreated() call from that same thread.
+ // OnCreated() call from that same thread. |user_input_monitor| is used for
+ // typing detection and can be NULL.
static scoped_refptr<AudioInputController> CreateForStream(
const scoped_refptr<base::MessageLoopProxy>& message_loop,
EventHandler* event_handler,
AudioInputStream* stream,
// External synchronous writer for audio controller.
- SyncWriter* sync_writer);
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor);
// Starts recording using the created audio input stream.
// This method is called on the creator thread.
@@ -201,7 +215,9 @@ class MEDIA_EXPORT AudioInputController
kError
};
- AudioInputController(EventHandler* handler, SyncWriter* sync_writer);
+ AudioInputController(EventHandler* handler,
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor);
virtual ~AudioInputController();
// Methods called on the audio thread (owned by the AudioManager).
@@ -266,6 +282,10 @@ class MEDIA_EXPORT AudioInputController
double max_volume_;
+ UserInputMonitor* user_input_monitor_;
+
+ size_t prev_key_down_count_;
+
DISALLOW_COPY_AND_ASSIGN(AudioInputController);
};
diff --git a/chromium/media/audio/audio_input_controller_unittest.cc b/chromium/media/audio/audio_input_controller_unittest.cc
index b96ef3ad016..6388cbf975b 100644
--- a/chromium/media/audio/audio_input_controller_unittest.cc
+++ b/chromium/media/audio/audio_input_controller_unittest.cc
@@ -83,9 +83,13 @@ TEST_F(AudioInputControllerTest, CreateAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
kSampleRate, kBitsPerSample, kSamplesPerPacket);
+
scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params,
- AudioManagerBase::kDefaultDeviceId);
+ AudioInputController::Create(audio_manager.get(),
+ &event_handler,
+ params,
+ AudioManagerBase::kDefaultDeviceId,
+ NULL);
ASSERT_TRUE(controller.get());
// Wait for OnCreated() to fire.
@@ -120,8 +124,11 @@ TEST_F(AudioInputControllerTest, RecordAndClose) {
// Creating the AudioInputController should render an OnCreated() call.
scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params,
- AudioManagerBase::kDefaultDeviceId);
+ AudioInputController::Create(audio_manager.get(),
+ &event_handler,
+ params,
+ AudioManagerBase::kDefaultDeviceId,
+ NULL);
ASSERT_TRUE(controller.get());
// Start recording and trigger one OnRecording() call.
@@ -167,8 +174,11 @@ TEST_F(AudioInputControllerTest, RecordAndError) {
// Creating the AudioInputController should render an OnCreated() call.
scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params,
- AudioManagerBase::kDefaultDeviceId);
+ AudioInputController::Create(audio_manager.get(),
+ &event_handler,
+ params,
+ AudioManagerBase::kDefaultDeviceId,
+ NULL);
ASSERT_TRUE(controller.get());
// Start recording and trigger one OnRecording() call.
@@ -196,11 +206,17 @@ TEST_F(AudioInputControllerTest, SamplesPerPacketTooLarge) {
.Times(Exactly(0));
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket * 1000);
+ AudioParameters params(AudioParameters::AUDIO_FAKE,
+ kChannelLayout,
+ kSampleRate,
+ kBitsPerSample,
+ kSamplesPerPacket * 1000);
scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params,
- AudioManagerBase::kDefaultDeviceId);
+ AudioInputController::Create(audio_manager.get(),
+ &event_handler,
+ params,
+ AudioManagerBase::kDefaultDeviceId,
+ NULL);
ASSERT_FALSE(controller.get());
}
@@ -216,11 +232,17 @@ TEST_F(AudioInputControllerTest, CloseTwice) {
.Times(Exactly(1));
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
+ AudioParameters params(AudioParameters::AUDIO_FAKE,
+ kChannelLayout,
+ kSampleRate,
+ kBitsPerSample,
+ kSamplesPerPacket);
scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params,
- AudioManagerBase::kDefaultDeviceId);
+ AudioInputController::Create(audio_manager.get(),
+ &event_handler,
+ params,
+ AudioManagerBase::kDefaultDeviceId,
+ NULL);
ASSERT_TRUE(controller.get());
controller->Record();
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
index 87fd57143cd..d7685840ecf 100644
--- a/chromium/media/audio/audio_input_device.cc
+++ b/chromium/media/audio/audio_input_device.cc
@@ -291,9 +291,12 @@ void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
uint8* ptr = static_cast<uint8*>(shared_memory_.memory());
ptr += current_segment_id_ * segment_length_;
AudioInputBuffer* buffer = reinterpret_cast<AudioInputBuffer*>(ptr);
- DCHECK_EQ(buffer->params.size,
+ // Usually this will be equal but in the case of low sample rate (e.g. 8kHz,
+ // the buffer may be bigger (on mac at least)).
+ DCHECK_GE(buffer->params.size,
segment_length_ - sizeof(AudioInputBufferParameters));
double volume = buffer->params.volume;
+ bool key_pressed = buffer->params.key_pressed;
int audio_delay_milliseconds = pending_data / bytes_per_ms_;
int16* memory = reinterpret_cast<int16*>(&buffer->audio[0]);
@@ -308,8 +311,8 @@ void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
// Deliver captured data to the client in floating point format
// and update the audio-delay measurement.
- capture_callback_->Capture(audio_bus_.get(),
- audio_delay_milliseconds, volume);
+ capture_callback_->Capture(
+ audio_bus_.get(), audio_delay_milliseconds, volume, key_pressed);
}
} // namespace media
diff --git a/chromium/media/audio/audio_low_latency_input_output_unittest.cc b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
index 33729c45a04..a616761294d 100644
--- a/chromium/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
@@ -308,7 +308,8 @@ class AudioOutputStreamTraits {
static StreamType* CreateStream(AudioManager* audio_manager,
const AudioParameters& params) {
- return audio_manager->MakeAudioOutputStream(params, std::string());
+ return audio_manager->MakeAudioOutputStream(params, std::string(),
+ std::string());
}
};
diff --git a/chromium/media/audio/audio_manager.h b/chromium/media/audio/audio_manager.h
index cc5b95c8197..891d2a26589 100644
--- a/chromium/media/audio/audio_manager.h
+++ b/chromium/media/audio/audio_manager.h
@@ -58,11 +58,16 @@ class MEDIA_EXPORT AudioManager {
// threads to avoid blocking the rest of the application.
virtual void ShowAudioInputSettings() = 0;
- // Appends a list of available input devices. It is not guaranteed that
- // all the devices in the list support all formats and sample rates for
+ // Appends a list of available input devices to |device_names|,
+ // which must initially be empty. It is not guaranteed that all the
+ // devices in the list support all formats and sample rates for
// recording.
virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
+ // Appends a list of available output devices to |device_names|,
+ // which must initially be empty.
+ virtual void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) = 0;
+
// Factory for all the supported stream formats. |params| defines parameters
// of the audio stream to be created.
//
@@ -71,6 +76,14 @@ class MEDIA_EXPORT AudioManager {
// or three buffers are created, one will be locked for playback and one will
// be ready to be filled in the call to AudioSourceCallback::OnMoreData().
//
+ // To create a stream for the default output device, pass an empty string
+ // for |device_id|, otherwise the specified audio device will be opened.
+ //
+ // The |input_device_id| is used for low-latency unified streams
+ // (input+output) only and then only if the audio parameters specify a >0
+ // input channel count. In other cases this id is ignored and should be
+ // empty.
+ //
// Returns NULL if the combination of the parameters is not supported, or if
// we have reached some other platform specific limit.
//
@@ -82,14 +95,18 @@ class MEDIA_EXPORT AudioManager {
//
// Do not free the returned AudioOutputStream. It is owned by AudioManager.
virtual AudioOutputStream* MakeAudioOutputStream(
- const AudioParameters& params, const std::string& input_device_id) = 0;
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Creates new audio output proxy. A proxy implements
// AudioOutputStream interface, but unlike regular output stream
// created with MakeAudioOutputStream() it opens device only when a
// sound is actually playing.
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
- const AudioParameters& params, const std::string& input_device_id) = 0;
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Factory to create audio recording streams.
// |channels| can be 1 or 2.
@@ -130,14 +147,28 @@ class MEDIA_EXPORT AudioManager {
// streams. It is a convenience interface to
// AudioManagerBase::GetPreferredOutputStreamParameters and each AudioManager
// does not need their own implementation to this interface.
+ // TODO(tommi): Remove this method and use GetOutputStreamParameteres instead.
virtual AudioParameters GetDefaultOutputStreamParameters() = 0;
+ // Returns the output hardware audio parameters for a specific output device.
+ virtual AudioParameters GetOutputStreamParameters(
+ const std::string& device_id) = 0;
+
// Returns the input hardware audio parameters of the specific device
// for opening input streams. Each AudioManager needs to implement their own
// version of this interface.
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) = 0;
+ // Returns the device id of an output device that belongs to the same hardware
+ // as the specified input device.
+ // If the hardware has only an input device (e.g. a webcam), the return value
+ // will be empty (which the caller can then interpret to be the default output
+ // device). Implementations that don't yet support this feature, must return
+ // an empty string.
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) = 0;
+
protected:
AudioManager();
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
index db77f004e38..5b1f4b3690a 100644
--- a/chromium/media/audio/audio_manager_base.cc
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -34,19 +34,23 @@ static const int kMaxInputChannels = 2;
const char AudioManagerBase::kDefaultDeviceName[] = "Default";
const char AudioManagerBase::kDefaultDeviceId[] = "default";
+const char AudioManagerBase::kLoopbackInputDeviceId[] = "loopback";
struct AudioManagerBase::DispatcherParams {
DispatcherParams(const AudioParameters& input,
const AudioParameters& output,
- const std::string& device_id)
+ const std::string& output_device_id,
+ const std::string& input_device_id)
: input_params(input),
output_params(output),
- input_device_id(device_id) {}
+ input_device_id(input_device_id),
+ output_device_id(output_device_id) {}
~DispatcherParams() {}
const AudioParameters input_params;
const AudioParameters output_params;
const std::string input_device_id;
+ const std::string output_device_id;
scoped_refptr<AudioOutputDispatcher> dispatcher;
private:
@@ -65,6 +69,7 @@ class AudioManagerBase::CompareByParams {
// of the existing dispatcher are the same as the request dispatcher.
return (dispatcher_->input_params == dispatcher_in->input_params &&
dispatcher_->output_params == dispatcher_in->output_params &&
+ dispatcher_->output_device_id == dispatcher_in->output_device_id &&
(!dispatcher_->input_params.input_channels() ||
dispatcher_->input_device_id == dispatcher_in->input_device_id));
}
@@ -134,6 +139,7 @@ scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
// TODO(miu): Fix ~50 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
@@ -159,10 +165,12 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
AudioOutputStream* stream;
switch (params.format()) {
case AudioParameters::AUDIO_PCM_LINEAR:
+ DCHECK(device_id.empty())
+ << "AUDIO_PCM_LINEAR supports only the default device.";
stream = MakeLinearOutputStream(params);
break;
case AudioParameters::AUDIO_PCM_LOW_LATENCY:
- stream = MakeLowLatencyOutputStream(params, input_device_id);
+ stream = MakeLowLatencyOutputStream(params, device_id, input_device_id);
break;
case AudioParameters::AUDIO_FAKE:
stream = FakeAudioOutputStream::MakeFakeStream(this, params);
@@ -180,7 +188,8 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
}
AudioInputStream* AudioManagerBase::MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) {
+ const AudioParameters& params,
+ const std::string& device_id) {
// TODO(miu): Fix ~20 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
// DCHECK(message_loop_->BelongsToCurrentThread());
@@ -222,19 +231,26 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
}
AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
- const AudioParameters& params, const std::string& input_device_id) {
-#if defined(OS_IOS)
- // IOS implements audio input only.
- NOTIMPLEMENTED();
- return NULL;
-#else
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
DCHECK(message_loop_->BelongsToCurrentThread());
+ // If the caller supplied an empty device id to select the default device,
+ // we fetch the actual device id of the default device so that the lookup
+ // will find the correct device regardless of whether it was opened as
+ // "default" or via the specific id.
+ // NOTE: Implementations that don't yet support opening non-default output
+ // devices may return an empty string from GetDefaultOutputDeviceID().
+ std::string output_device_id = device_id.empty() ?
+ GetDefaultOutputDeviceID() : device_id;
+
// If we're not using AudioOutputResampler our output parameters are the same
// as our input parameters.
AudioParameters output_params = params;
if (params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
- output_params = GetPreferredOutputStreamParameters(params);
+ output_params =
+ GetPreferredOutputStreamParameters(output_device_id, params);
// Ensure we only pass on valid output parameters.
if (!output_params.IsValid()) {
@@ -257,7 +273,8 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
}
DispatcherParams* dispatcher_params =
- new DispatcherParams(params, output_params, input_device_id);
+ new DispatcherParams(params, output_params, output_device_id,
+ input_device_id);
AudioOutputDispatchers::iterator it =
std::find_if(output_dispatchers_.begin(), output_dispatchers_.end(),
@@ -272,23 +289,28 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
scoped_refptr<AudioOutputDispatcher> dispatcher;
if (output_params.format() != AudioParameters::AUDIO_FAKE) {
dispatcher = new AudioOutputResampler(this, params, output_params,
- input_device_id, kCloseDelay);
+ output_device_id, input_device_id,
+ kCloseDelay);
} else {
dispatcher = new AudioOutputDispatcherImpl(this, output_params,
+ output_device_id,
input_device_id, kCloseDelay);
}
dispatcher_params->dispatcher = dispatcher;
output_dispatchers_.push_back(dispatcher_params);
return new AudioOutputProxy(dispatcher.get());
-#endif // defined(OS_IOS)
}
void AudioManagerBase::ShowAudioInputSettings() {
}
void AudioManagerBase::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+ AudioDeviceNames* device_names) {
+}
+
+void AudioManagerBase::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
}
void AudioManagerBase::ReleaseOutputStream(AudioOutputStream* stream) {
@@ -333,10 +355,6 @@ void AudioManagerBase::Shutdown() {
}
void AudioManagerBase::ShutdownOnAudioThread() {
-// IOS implements audio input only.
-#if defined(OS_IOS)
- return;
-#else
// This should always be running on the audio thread, but since we've cleared
// the audio_thread_ member pointer when we get here, we can't verify exactly
// what thread we're running on. The method is not public though and only
@@ -357,7 +375,6 @@ void AudioManagerBase::ShutdownOnAudioThread() {
}
output_dispatchers_.clear();
-#endif // defined(OS_IOS)
}
void AudioManagerBase::AddOutputDeviceChangeListener(
@@ -379,7 +396,14 @@ void AudioManagerBase::NotifyAllOutputDeviceChangeListeners() {
}
AudioParameters AudioManagerBase::GetDefaultOutputStreamParameters() {
- return GetPreferredOutputStreamParameters(AudioParameters());
+ return GetPreferredOutputStreamParameters(GetDefaultOutputDeviceID(),
+ AudioParameters());
+}
+
+AudioParameters AudioManagerBase::GetOutputStreamParameters(
+ const std::string& device_id) {
+ return GetPreferredOutputStreamParameters(device_id,
+ AudioParameters());
}
AudioParameters AudioManagerBase::GetInputStreamParameters(
@@ -388,4 +412,15 @@ AudioParameters AudioManagerBase::GetInputStreamParameters(
return AudioParameters();
}
+std::string AudioManagerBase::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ NOTIMPLEMENTED();
+ return "";
+}
+
+std::string AudioManagerBase::GetDefaultOutputDeviceID() {
+ NOTIMPLEMENTED();
+ return "";
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_manager_base.h b/chromium/media/audio/audio_manager_base.h
index 8b34d9fcf94..cdf7d3a76ae 100644
--- a/chromium/media/audio/audio_manager_base.h
+++ b/chromium/media/audio/audio_manager_base.h
@@ -32,11 +32,24 @@ class AudioOutputDispatcher;
// AudioManagerBase provides AudioManager functions common for all platforms.
class MEDIA_EXPORT AudioManagerBase : public AudioManager {
public:
+ // TODO(sergeyu): The constants below belong to AudioManager interface, not
+ // to the base implementation.
+
// Name of the generic "default" device.
static const char kDefaultDeviceName[];
// Unique Id of the generic "default" device.
static const char kDefaultDeviceId[];
+ // Input device ID used to capture the default system playback stream. When
+ // this device ID is passed to MakeAudioInputStream() the returned
+ // AudioInputStream will be capturing audio currently being played on the
+ // default playback device. At the moment this feature is supported only on
+ // some platforms. AudioInputStream::Intialize() will return an error on
+ // platforms that don't support it. GetInputStreamParameters() must be used
+ // to get the parameters of the loopback device before creating a loopback
+ // stream, otherwise stream initialization may fail.
+ static const char kLoopbackInputDeviceId[];
+
virtual ~AudioManagerBase();
virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
@@ -47,10 +60,14 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual void ShowAudioInputSettings() OVERRIDE;
virtual void GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) OVERRIDE;
+ AudioDeviceNames* device_names) OVERRIDE;
+
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
@@ -58,6 +75,7 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
// Called internally by the audio stream when it has been closed.
@@ -72,7 +90,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// Creates the output stream for the |AUDIO_PCM_LOW_LATENCY| format.
// |input_device_id| is used by unified IO to open the correct input device.
virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) = 0;
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Creates the input stream for the |AUDIO_PCM_LINEAR| format. The legacy
// name is also from |AUDIO_PCM_LINEAR|.
@@ -90,9 +110,15 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
AudioDeviceListener* listener) OVERRIDE;
virtual AudioParameters GetDefaultOutputStreamParameters() OVERRIDE;
+ virtual AudioParameters GetOutputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
+
protected:
AudioManagerBase();
@@ -115,9 +141,16 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// will decide if they should return the values from |input_params| or the
// default hardware values. If the |input_params| is invalid, it will return
// the default hardware audio parameters.
+ // If |output_device_id| is empty, the implementation must treat that as
+ // a request for the default output device.
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) = 0;
+ // Returns the ID of the default audio output device.
+ // Implementations that don't yet support this should return an empty string.
+ virtual std::string GetDefaultOutputDeviceID();
+
// Get number of input or output streams.
int input_stream_count() { return num_input_streams_; }
int output_stream_count() { return num_output_streams_; }
diff --git a/chromium/media/audio/audio_input_device_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index dc211a48a93..4747c2e2996 100644
--- a/chromium/media/audio/audio_input_device_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,20 +9,28 @@
#include "media/audio/audio_manager_base.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_LINUX)
+#include "media/audio/linux/audio_manager_linux.h"
+#endif // defined(OS_LINUX)
+
#if defined(OS_WIN)
#include "base/win/scoped_com_initializer.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/wavein_input_win.h"
#endif
+#if defined(USE_PULSEAUDIO)
+#include "media/audio/pulse/audio_manager_pulse.h"
+#endif // defined(USE_PULSEAUDIO)
+
namespace media {
// Test fixture which allows us to override the default enumeration API on
// Windows.
-class AudioInputDeviceTest
+class AudioManagerTest
: public ::testing::Test {
protected:
- AudioInputDeviceTest()
+ AudioManagerTest()
: audio_manager_(AudioManager::Create())
#if defined(OS_WIN)
, com_init_(base::win::ScopedCOMInitializer::kMTA)
@@ -64,6 +72,7 @@ class AudioInputDeviceTest
// Helper method which verifies that the device list starts with a valid
// default record followed by non-default device names.
static void CheckDeviceNames(const AudioDeviceNames& device_names) {
+ VLOG(2) << "Got " << device_names.size() << " audio devices.";
if (!device_names.empty()) {
AudioDeviceNames::const_iterator it = device_names.begin();
@@ -78,6 +87,8 @@ class AudioInputDeviceTest
while (it != device_names.end()) {
EXPECT_FALSE(it->device_name.empty());
EXPECT_FALSE(it->unique_id.empty());
+ VLOG(2) << "Device ID(" << it->unique_id
+ << "), label: " << it->device_name;
EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
it->device_name);
EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
@@ -92,10 +103,14 @@ class AudioInputDeviceTest
}
}
- bool CanRunAudioTest() {
+ bool CanRunInputTest() {
return audio_manager_->HasAudioInputDevices();
}
+ bool CanRunOutputTest() {
+ return audio_manager_->HasAudioOutputDevices();
+ }
+
scoped_ptr<AudioManager> audio_manager_;
#if defined(OS_WIN)
@@ -105,8 +120,8 @@ class AudioInputDeviceTest
};
// Test that devices can be enumerated.
-TEST_F(AudioInputDeviceTest, EnumerateDevices) {
- if (!CanRunAudioTest())
+TEST_F(AudioManagerTest, EnumerateInputDevices) {
+ if (!CanRunInputTest())
return;
AudioDeviceNames device_names;
@@ -114,6 +129,16 @@ TEST_F(AudioInputDeviceTest, EnumerateDevices) {
CheckDeviceNames(device_names);
}
+// Test that devices can be enumerated.
+TEST_F(AudioManagerTest, EnumerateOutputDevices) {
+ if (!CanRunOutputTest())
+ return;
+
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioOutputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+
// Run additional tests for Windows since enumeration can be done using
// two different APIs. MMDevice is default for Vista and higher and Wave
// is default for XP and lower.
@@ -121,8 +146,8 @@ TEST_F(AudioInputDeviceTest, EnumerateDevices) {
// Override default enumeration API and force usage of Windows MMDevice.
// This test will only run on Windows Vista and higher.
-TEST_F(AudioInputDeviceTest, EnumerateDevicesWinMMDevice) {
- if (!CanRunAudioTest())
+TEST_F(AudioManagerTest, EnumerateInputDevicesWinMMDevice) {
+ if (!CanRunInputTest())
return;
AudioDeviceNames device_names;
@@ -135,10 +160,24 @@ TEST_F(AudioInputDeviceTest, EnumerateDevicesWinMMDevice) {
CheckDeviceNames(device_names);
}
+TEST_F(AudioManagerTest, EnumerateOutputDevicesWinMMDevice) {
+ if (!CanRunOutputTest())
+ return;
+
+ AudioDeviceNames device_names;
+ if (!SetMMDeviceEnumeration()) {
+ // Usage of MMDevice will fail on XP and lower.
+ LOG(WARNING) << "MM device enumeration is not supported.";
+ return;
+ }
+ audio_manager_->GetAudioOutputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+
// Override default enumeration API and force usage of Windows Wave.
// This test will run on Windows XP, Windows Vista and Windows 7.
-TEST_F(AudioInputDeviceTest, EnumerateDevicesWinWave) {
- if (!CanRunAudioTest())
+TEST_F(AudioManagerTest, EnumerateInputDevicesWinWave) {
+ if (!CanRunInputTest())
return;
AudioDeviceNames device_names;
@@ -147,8 +186,18 @@ TEST_F(AudioInputDeviceTest, EnumerateDevicesWinWave) {
CheckDeviceNames(device_names);
}
-TEST_F(AudioInputDeviceTest, WinXPDeviceIdUnchanged) {
- if (!CanRunAudioTest())
+TEST_F(AudioManagerTest, EnumerateOutputDevicesWinWave) {
+ if (!CanRunOutputTest())
+ return;
+
+ AudioDeviceNames device_names;
+ SetWaveEnumeration();
+ audio_manager_->GetAudioOutputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+
+TEST_F(AudioManagerTest, WinXPDeviceIdUnchanged) {
+ if (!CanRunInputTest())
return;
AudioDeviceNames xp_device_names;
@@ -164,8 +213,8 @@ TEST_F(AudioInputDeviceTest, WinXPDeviceIdUnchanged) {
}
}
-TEST_F(AudioInputDeviceTest, ConvertToWinXPDeviceId) {
- if (!CanRunAudioTest())
+TEST_F(AudioManagerTest, ConvertToWinXPInputDeviceId) {
+ if (!CanRunInputTest())
return;
if (!SetMMDeviceEnumeration()) {
@@ -194,6 +243,103 @@ TEST_F(AudioInputDeviceTest, ConvertToWinXPDeviceId) {
}
}
-#endif
+#endif // defined(OS_WIN)
+
+#if defined(USE_PULSEAUDIO)
+// On Linux, there are two implementations available and both can
+// sometimes be tested on a single system. These tests specifically
+// test Pulseaudio.
+
+TEST_F(AudioManagerTest, EnumerateInputDevicesPulseaudio) {
+ if (!CanRunInputTest())
+ return;
+
+ audio_manager_.reset(AudioManagerPulse::Create());
+ if (audio_manager_.get()) {
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+ } else {
+ LOG(WARNING) << "No pulseaudio on this system.";
+ }
+}
+
+TEST_F(AudioManagerTest, EnumerateOutputDevicesPulseaudio) {
+ if (!CanRunOutputTest())
+ return;
+
+ audio_manager_.reset(AudioManagerPulse::Create());
+ if (audio_manager_.get()) {
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioOutputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+ } else {
+ LOG(WARNING) << "No pulseaudio on this system.";
+ }
+}
+#endif // defined(USE_PULSEAUDIO)
+
+#if defined(USE_ALSA)
+// On Linux, there are two implementations available and both can
+// sometimes be tested on a single system. These tests specifically
+// test Alsa.
+
+TEST_F(AudioManagerTest, EnumerateInputDevicesAlsa) {
+ if (!CanRunInputTest())
+ return;
+
+ VLOG(2) << "Testing AudioManagerLinux.";
+ audio_manager_.reset(new AudioManagerLinux());
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+
+TEST_F(AudioManagerTest, EnumerateOutputDevicesAlsa) {
+ if (!CanRunOutputTest())
+ return;
+
+ VLOG(2) << "Testing AudioManagerLinux.";
+ audio_manager_.reset(new AudioManagerLinux());
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioOutputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+#endif // defined(USE_ALSA)
+
+TEST_F(AudioManagerTest, GetDefaultOutputStreamParameters) {
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ if (!CanRunInputTest())
+ return;
+
+ AudioParameters params = audio_manager_->GetDefaultOutputStreamParameters();
+ EXPECT_TRUE(params.IsValid());
+#endif // defined(OS_WIN) || defined(OS_MACOSX)
+}
+
+TEST_F(AudioManagerTest, GetAssociatedOutputDeviceID) {
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ if (!CanRunInputTest() || !CanRunOutputTest())
+ return;
+
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ bool found_an_associated_device = false;
+ for (AudioDeviceNames::iterator it = device_names.begin();
+ it != device_names.end();
+ ++it) {
+ EXPECT_FALSE(it->unique_id.empty());
+ EXPECT_FALSE(it->device_name.empty());
+ std::string output_device_id(
+ audio_manager_->GetAssociatedOutputDeviceID(it->unique_id));
+ if (!output_device_id.empty()) {
+ VLOG(2) << it->unique_id << " matches with " << output_device_id;
+ found_an_associated_device = true;
+ }
+ }
+
+ EXPECT_TRUE(found_an_associated_device);
+#endif // defined(OS_WIN) || defined(OS_MACOSX)
+}
} // namespace media
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
index f7f4cf8240b..649612cd4f6 100644
--- a/chromium/media/audio/audio_output_controller.cc
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -20,26 +20,31 @@ using base::TimeDelta;
namespace media {
+#if defined(AUDIO_POWER_MONITORING)
// Time constant for AudioPowerMonitor. See AudioPowerMonitor ctor comments for
// semantics. This value was arbitrarily chosen, but seems to work well.
static const int kPowerMeasurementTimeConstantMillis = 10;
// Desired frequency of calls to EventHandler::OnPowerMeasured() for reporting
// power levels in the audio signal.
-static const int kPowerMeasurementsPerSecond = 30;
+static const int kPowerMeasurementsPerSecond = 4;
+#endif
// Polling-related constants.
const int AudioOutputController::kPollNumAttempts = 3;
const int AudioOutputController::kPollPauseInMilliseconds = 3;
-AudioOutputController::AudioOutputController(AudioManager* audio_manager,
- EventHandler* handler,
- const AudioParameters& params,
- const std::string& input_device_id,
- SyncReader* sync_reader)
+AudioOutputController::AudioOutputController(
+ AudioManager* audio_manager,
+ EventHandler* handler,
+ const AudioParameters& params,
+ const std::string& output_device_id,
+ const std::string& input_device_id,
+ SyncReader* sync_reader)
: audio_manager_(audio_manager),
params_(params),
handler_(handler),
+ output_device_id_(output_device_id),
input_device_id_(input_device_id),
stream_(NULL),
diverting_to_stream_(NULL),
@@ -48,10 +53,12 @@ AudioOutputController::AudioOutputController(AudioManager* audio_manager,
num_allowed_io_(0),
sync_reader_(sync_reader),
message_loop_(audio_manager->GetMessageLoop()),
- number_polling_attempts_left_(0),
+#if defined(AUDIO_POWER_MONITORING)
power_monitor_(
params.sample_rate(),
- TimeDelta::FromMilliseconds(kPowerMeasurementTimeConstantMillis)) {
+ TimeDelta::FromMilliseconds(kPowerMeasurementTimeConstantMillis)),
+#endif
+ number_polling_attempts_left_(0) {
DCHECK(audio_manager);
DCHECK(handler_);
DCHECK(sync_reader_);
@@ -67,6 +74,7 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
AudioManager* audio_manager,
EventHandler* event_handler,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
SyncReader* sync_reader) {
DCHECK(audio_manager);
@@ -76,7 +84,8 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
return NULL;
scoped_refptr<AudioOutputController> controller(new AudioOutputController(
- audio_manager, event_handler, params, input_device_id, sync_reader));
+ audio_manager, event_handler, params, output_device_id, input_device_id,
+ sync_reader));
controller->message_loop_->PostTask(FROM_HERE, base::Bind(
&AudioOutputController::DoCreate, controller, false));
return controller;
@@ -114,8 +123,10 @@ void AudioOutputController::DoCreate(bool is_for_device_change) {
DoStopCloseAndClearStream(); // Calls RemoveOutputDeviceChangeListener().
DCHECK_EQ(kEmpty, state_);
- stream_ = diverting_to_stream_ ? diverting_to_stream_ :
- audio_manager_->MakeAudioOutputStreamProxy(params_, input_device_id_);
+ stream_ = diverting_to_stream_ ?
+ diverting_to_stream_ :
+ audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_,
+ input_device_id_);
if (!stream_) {
state_ = kError;
handler_->OnError();
@@ -158,6 +169,7 @@ void AudioOutputController::DoPlay() {
state_ = kPlaying;
+#if defined(AUDIO_POWER_MONITORING)
power_monitor_.Reset();
power_poll_callback_.Reset(
base::Bind(&AudioOutputController::ReportPowerMeasurementPeriodically,
@@ -165,6 +177,7 @@ void AudioOutputController::DoPlay() {
// Run the callback to send an initial notification that we're starting in
// silence, and to schedule periodic callbacks.
power_poll_callback_.callback().Run();
+#endif
// We start the AudioOutputStream lazily.
AllowEntryToOnMoreIOData();
@@ -173,6 +186,7 @@ void AudioOutputController::DoPlay() {
handler_->OnPlaying();
}
+#if defined(AUDIO_POWER_MONITORING)
void AudioOutputController::ReportPowerMeasurementPeriodically() {
DCHECK(message_loop_->BelongsToCurrentThread());
const std::pair<float, bool>& reading =
@@ -182,6 +196,7 @@ void AudioOutputController::ReportPowerMeasurementPeriodically() {
FROM_HERE, power_poll_callback_.callback(),
TimeDelta::FromSeconds(1) / kPowerMeasurementsPerSecond);
}
+#endif
void AudioOutputController::StopStream() {
DCHECK(message_loop_->BelongsToCurrentThread());
@@ -190,7 +205,9 @@ void AudioOutputController::StopStream() {
stream_->Stop();
DisallowEntryToOnMoreIOData();
+#if defined(AUDIO_POWER_MONITORING)
power_poll_callback_.Cancel();
+#endif
state_ = kPaused;
}
@@ -208,8 +225,10 @@ void AudioOutputController::DoPause() {
// Send a special pause mark to the low-latency audio thread.
sync_reader_->UpdatePendingBytes(kPauseMark);
+#if defined(AUDIO_POWER_MONITORING)
// Paused means silence follows.
handler_->OnPowerMeasured(AudioPowerMonitor::zero_power(), false);
+#endif
handler_->OnPaused();
}
@@ -283,7 +302,9 @@ int AudioOutputController::OnMoreIOData(AudioBus* source,
sync_reader_->UpdatePendingBytes(
buffers_state.total_bytes() + frames * params_.GetBytesPerFrame());
+#if defined(AUDIO_POWER_MONITORING)
power_monitor_.Scan(*dest, frames);
+#endif
AllowEntryToOnMoreIOData();
return frames;
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
index 38a2c03f590..615c6a5e6c6 100644
--- a/chromium/media/audio/audio_output_controller.h
+++ b/chromium/media/audio/audio_output_controller.h
@@ -52,6 +52,11 @@
namespace media {
+// Only do power monitoring for non-mobile platforms that need it for the UI.
+#if !defined(OS_ANDROID) && !defined(OS_IOS)
+#define AUDIO_POWER_MONITORING
+#endif
+
class MEDIA_EXPORT AudioOutputController
: public base::RefCountedThreadSafe<AudioOutputController>,
public AudioOutputStream::AudioSourceCallback,
@@ -101,10 +106,14 @@ class MEDIA_EXPORT AudioOutputController
// thread, and if this is successful, the |event_handler| will receive an
// OnCreated() call from the same audio manager thread. |audio_manager| must
// outlive AudioOutputController.
+ // The |output_device_id| can be either empty (default device) or specify a
+ // specific hardware device for audio output. The |input_device_id| is
+ // used only for unified audio when opening up input and output at the same
+ // time (controlled by |params.input_channel_count()|).
static scoped_refptr<AudioOutputController> Create(
AudioManager* audio_manager, EventHandler* event_handler,
- const AudioParameters& params, const std::string& input_device_id,
- SyncReader* sync_reader);
+ const AudioParameters& params, const std::string& output_device_id,
+ const std::string& input_device_id, SyncReader* sync_reader);
// Methods to control playback of the stream.
@@ -166,6 +175,7 @@ class MEDIA_EXPORT AudioOutputController
AudioOutputController(AudioManager* audio_manager, EventHandler* handler,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
SyncReader* sync_reader);
@@ -198,8 +208,12 @@ class MEDIA_EXPORT AudioOutputController
const AudioParameters params_;
EventHandler* const handler_;
+ // Specifies the device id of the output device to open or empty for the
+ // default output device.
+ const std::string output_device_id_;
+
// Used by the unified IO to open the correct input device.
- std::string input_device_id_;
+ const std::string input_device_id_;
AudioOutputStream* stream_;
@@ -227,15 +241,17 @@ class MEDIA_EXPORT AudioOutputController
// The message loop of audio manager thread that this object runs on.
const scoped_refptr<base::MessageLoopProxy> message_loop_;
- // When starting stream we wait for data to become available.
- // Number of times left.
- int number_polling_attempts_left_;
-
+#if defined(AUDIO_POWER_MONITORING)
// Scans audio samples from OnMoreIOData() as input to compute power levels.
AudioPowerMonitor power_monitor_;
// Periodic callback to report power levels during playback.
base::CancelableClosure power_poll_callback_;
+#endif
+
+ // When starting stream we wait for data to become available.
+ // Number of times left.
+ int number_polling_attempts_left_;
DISALLOW_COPY_AND_ASSIGN(AudioOutputController);
};
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
index 128cc07716f..a7118e17a30 100644
--- a/chromium/media/audio/audio_output_controller_unittest.cc
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -29,8 +29,6 @@ static const int kSampleRate = AudioParameters::kAudioCDSampleRate;
static const int kBitsPerSample = 16;
static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
static const int kSamplesPerPacket = kSampleRate / 100;
-static const int kHardwareBufferSize = kSamplesPerPacket *
- ChannelLayoutToChannelCount(kChannelLayout) * kBitsPerSample / 8;
static const double kTestVolume = 0.25;
class MockAudioOutputControllerEventHandler
@@ -122,7 +120,7 @@ class AudioOutputControllerTest : public testing::Test {
controller_ = AudioOutputController::Create(
audio_manager_.get(), &mock_event_handler_, params_, std::string(),
- &mock_sync_reader_);
+ std::string(), &mock_sync_reader_);
if (controller_.get())
controller_->SetVolume(kTestVolume);
@@ -134,8 +132,10 @@ class AudioOutputControllerTest : public testing::Test {
// OnPowerMeasured() calls.
EXPECT_CALL(mock_event_handler_, OnPlaying())
.WillOnce(SignalEvent(&play_event_));
+#if defined(AUDIO_POWER_MONITORING)
EXPECT_CALL(mock_event_handler_, OnPowerMeasured(_, false))
.Times(AtLeast(1));
+#endif
// During playback, the mock pretends to provide audio data rendered and
// sent from the render process.
diff --git a/chromium/media/audio/audio_output_dispatcher.cc b/chromium/media/audio/audio_output_dispatcher.cc
index 06206d7be7f..a151c449f02 100644
--- a/chromium/media/audio/audio_output_dispatcher.cc
+++ b/chromium/media/audio/audio_output_dispatcher.cc
@@ -11,10 +11,12 @@ namespace media {
AudioOutputDispatcher::AudioOutputDispatcher(
AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id)
: audio_manager_(audio_manager),
message_loop_(base::MessageLoop::current()),
params_(params),
+ output_device_id_(output_device_id),
input_device_id_(input_device_id) {
// We expect to be instantiated on the audio thread. Otherwise the
// message_loop_ member will point to the wrong message loop!
diff --git a/chromium/media/audio/audio_output_dispatcher.h b/chromium/media/audio/audio_output_dispatcher.h
index a79fd94477f..30266ed6a9a 100644
--- a/chromium/media/audio/audio_output_dispatcher.h
+++ b/chromium/media/audio/audio_output_dispatcher.h
@@ -38,6 +38,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
public:
AudioOutputDispatcher(AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id);
// Called by AudioOutputProxy to open the stream.
@@ -79,6 +80,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
AudioManager* audio_manager_;
base::MessageLoop* message_loop_;
AudioParameters params_;
+ const std::string output_device_id_;
const std::string input_device_id_;
private:
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
index 1df8e7ddd5b..bcdcd65146e 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.cc
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -19,9 +19,11 @@ namespace media {
AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, params, input_device_id),
+ : AudioOutputDispatcher(audio_manager, params, output_device_id,
+ input_device_id),
pause_delay_(base::TimeDelta::FromMicroseconds(
2 * params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
static_cast<float>(params.sample_rate()))),
@@ -168,7 +170,7 @@ void AudioOutputDispatcherImpl::Shutdown() {
bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
DCHECK_EQ(base::MessageLoop::current(), message_loop_);
AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
- params_, input_device_id_);
+ params_, output_device_id_, input_device_id_);
if (!stream)
return false;
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
index 06fe3ebeaf1..b59f835f9b0 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.h
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -35,6 +35,7 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
// the audio device is closed.
AudioOutputDispatcherImpl(AudioManager* audio_manager,
const AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay);
diff --git a/chromium/media/audio/audio_output_proxy_unittest.cc b/chromium/media/audio/audio_output_proxy_unittest.cc
index de95b0661ec..1806ce66131 100644
--- a/chromium/media/audio/audio_output_proxy_unittest.cc
+++ b/chromium/media/audio/audio_output_proxy_unittest.cc
@@ -95,10 +95,14 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD0(GetAudioInputDeviceModel, string16());
- MOCK_METHOD2(MakeAudioOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
- MOCK_METHOD2(MakeAudioOutputStreamProxy, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD3(MakeAudioOutputStream, AudioOutputStream*(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id));
+ MOCK_METHOD3(MakeAudioOutputStreamProxy, AudioOutputStream*(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeAudioInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD0(ShowAudioInputSettings, void());
@@ -108,14 +112,15 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeLinearInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
- MOCK_METHOD1(GetPreferredOutputStreamParameters, AudioParameters(
- const AudioParameters& params));
+ MOCK_METHOD2(GetPreferredOutputStreamParameters, AudioParameters(
+ const std::string& device_id, const AudioParameters& params));
};
class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
@@ -161,6 +166,7 @@ class AudioOutputProxyTest : public testing::Test {
dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
params_,
std::string(),
+ std::string(),
close_delay);
// Necessary to know how long the dispatcher will wait before posting
@@ -186,7 +192,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenAndClose(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -203,7 +209,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartAndStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -229,7 +235,7 @@ class AudioOutputProxyTest : public testing::Test {
void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -261,7 +267,7 @@ class AudioOutputProxyTest : public testing::Test {
void TwoStreams(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -283,7 +289,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(false));
@@ -301,7 +307,7 @@ class AudioOutputProxyTest : public testing::Test {
void CreateAndWait(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -328,7 +334,7 @@ class AudioOutputProxyTest : public testing::Test {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2));
@@ -366,7 +372,7 @@ class AudioOutputProxyTest : public testing::Test {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2));
@@ -406,7 +412,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -425,7 +431,7 @@ class AudioOutputProxyTest : public testing::Test {
Mock::VerifyAndClear(&stream);
// |stream| is closed at this point. Start() should reopen it again.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillRepeatedly(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
@@ -467,7 +473,8 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
16000, 16, 1024);
resampler_ = new AudioOutputResampler(
- &manager(), params_, resampler_params_, std::string(), close_delay);
+ &manager(), params_, resampler_params_, std::string(), std::string(),
+ close_delay);
}
virtual void OnStart() OVERRIDE {
@@ -568,7 +575,7 @@ TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_.get()); }
// ensure AudioOutputResampler falls back to the high latency path.
TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillOnce(Return(static_cast<AudioOutputStream*>(NULL)))
.WillRepeatedly(Return(&stream));
@@ -588,7 +595,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
MockAudioOutputStream failed_stream(&manager_, params_);
MockAudioOutputStream okay_stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillOnce(Return(&failed_stream))
.WillRepeatedly(Return(&okay_stream));
@@ -619,7 +626,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
#else
static const int kFallbackCount = 1;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -630,7 +637,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
testing::Property(&AudioParameters::sample_rate, params_.sample_rate()),
testing::Property(
&AudioParameters::frames_per_buffer, params_.frames_per_buffer())),
- _))
+ _, _))
.Times(1)
.WillOnce(Return(&okay_stream));
EXPECT_CALL(okay_stream, Open())
@@ -655,7 +662,7 @@ TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
#else
static const int kFallbackCount = 2;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -673,7 +680,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
MockAudioOutputStream stream3(&manager_, params_);
// Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2))
.WillOnce(Return(&stream3))
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
index 6db0e2fb2fe..da424ec1246 100644
--- a/chromium/media/audio/audio_output_resampler.cc
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -147,12 +147,13 @@ static AudioParameters SetupFallbackParams(
AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
const AudioParameters& output_params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, input_params, input_device_id),
+ : AudioOutputDispatcher(audio_manager, input_params, output_device_id,
+ input_device_id),
close_delay_(close_delay),
output_params_(output_params),
- input_device_id_(input_device_id),
streams_opened_(false) {
DCHECK(input_params.IsValid());
DCHECK(output_params.IsValid());
@@ -172,7 +173,8 @@ void AudioOutputResampler::Initialize() {
DCHECK(!streams_opened_);
DCHECK(callbacks_.empty());
dispatcher_ = new AudioOutputDispatcherImpl(
- audio_manager_, output_params_, input_device_id_, close_delay_);
+ audio_manager_, output_params_, output_device_id_, input_device_id_,
+ close_delay_);
}
bool AudioOutputResampler::OpenStream() {
diff --git a/chromium/media/audio/audio_output_resampler.h b/chromium/media/audio/audio_output_resampler.h
index df9e4320b55..f9a75ac38f5 100644
--- a/chromium/media/audio/audio_output_resampler.h
+++ b/chromium/media/audio/audio_output_resampler.h
@@ -40,6 +40,7 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
const AudioParameters& output_params,
+ const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay);
@@ -74,9 +75,6 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
// AudioParameters used to setup the output stream.
AudioParameters output_params_;
- // Device ID to be used by the unified IO to open the correct input device.
- const std::string input_device_id_;
-
// Whether any streams have been opened through |dispatcher_|, if so we can't
// fallback on future OpenStream() failures.
bool streams_opened_;
diff --git a/chromium/media/audio/audio_parameters.h b/chromium/media/audio/audio_parameters.h
index 2817cd2c5a6..bc629a7db00 100644
--- a/chromium/media/audio/audio_parameters.h
+++ b/chromium/media/audio/audio_parameters.h
@@ -14,6 +14,7 @@ namespace media {
struct MEDIA_EXPORT AudioInputBufferParameters {
double volume;
uint32 size;
+ bool key_pressed;
};
// Use a struct-in-struct approach to ensure that we can calculate the required
diff --git a/chromium/media/audio/clockless_audio_sink.cc b/chromium/media/audio/clockless_audio_sink.cc
new file mode 100644
index 00000000000..ff809d0541d
--- /dev/null
+++ b/chromium/media/audio/clockless_audio_sink.cc
@@ -0,0 +1,107 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/clockless_audio_sink.h"
+
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace media {
+
+// Internal to ClocklessAudioSink. Class is used to call Render() on a seperate
+// thread, running as fast as it can read the data.
+class ClocklessAudioSinkThread : public base::DelegateSimpleThread::Delegate {
+ public:
+ explicit ClocklessAudioSinkThread(const AudioParameters& params,
+ AudioRendererSink::RenderCallback* callback)
+ : callback_(callback),
+ audio_bus_(AudioBus::Create(params)),
+ stop_event_(new base::WaitableEvent(false, false)) {}
+
+ void Start() {
+ stop_event_->Reset();
+ thread_.reset(new base::DelegateSimpleThread(this, "ClocklessAudioSink"));
+ thread_->Start();
+ }
+
+ // Generate a signal to stop calling Render().
+ base::TimeDelta Stop() {
+ stop_event_->Signal();
+ thread_->Join();
+ return playback_time_;
+ }
+
+ private:
+ // Call Render() repeatedly, keeping track of the rendering time.
+ virtual void Run() OVERRIDE {
+ base::TimeTicks start;
+ while (!stop_event_->IsSignaled()) {
+ int frames_received = callback_->Render(audio_bus_.get(), 0);
+ if (frames_received <= 0) {
+ // No data received, so let other threads run to provide data.
+ base::PlatformThread::YieldCurrentThread();
+ } else if (start.is_null()) {
+ // First time we processed some audio, so record the starting time.
+ start = base::TimeTicks::HighResNow();
+ } else {
+ // Keep track of the last time data was rendered.
+ playback_time_ = base::TimeTicks::HighResNow() - start;
+ }
+ }
+ }
+
+ AudioRendererSink::RenderCallback* callback_;
+ scoped_ptr<AudioBus> audio_bus_;
+ scoped_ptr<base::WaitableEvent> stop_event_;
+ scoped_ptr<base::DelegateSimpleThread> thread_;
+ base::TimeDelta playback_time_;
+};
+
+ClocklessAudioSink::ClocklessAudioSink()
+ : initialized_(false),
+ playing_(false) {}
+
+ClocklessAudioSink::~ClocklessAudioSink() {}
+
+void ClocklessAudioSink::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ DCHECK(!initialized_);
+ thread_.reset(new ClocklessAudioSinkThread(params, callback));
+ initialized_ = true;
+}
+
+void ClocklessAudioSink::Start() {
+ DCHECK(!playing_);
+}
+
+void ClocklessAudioSink::Stop() {
+ DCHECK(initialized_);
+
+ if (!playing_)
+ return;
+
+ playback_time_ = thread_->Stop();
+}
+
+void ClocklessAudioSink::Play() {
+ DCHECK(initialized_);
+
+ if (playing_)
+ return;
+
+ playing_ = true;
+ thread_->Start();
+}
+
+void ClocklessAudioSink::Pause() {
+ Stop();
+}
+
+bool ClocklessAudioSink::SetVolume(double volume) {
+ // Audio is always muted.
+ return volume == 0.0;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/clockless_audio_sink.h b/chromium/media/audio/clockless_audio_sink.h
new file mode 100644
index 00000000000..9e73b1a8817
--- /dev/null
+++ b/chromium/media/audio/clockless_audio_sink.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_CLOCKLESS_AUDIO_SINK_H_
+#define MEDIA_AUDIO_CLOCKLESS_AUDIO_SINK_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+class AudioBus;
+class ClocklessAudioSinkThread;
+
+// Implementation of an AudioRendererSink that consumes the audio as fast as
+// possible. This class does not support multiple Play()/Pause() events.
+class MEDIA_EXPORT ClocklessAudioSink
+ : NON_EXPORTED_BASE(public AudioRendererSink) {
+ public:
+ ClocklessAudioSink();
+
+ // AudioRendererSink implementation.
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+
+ // Returns the time taken to consume all the audio.
+ base::TimeDelta render_time() { return playback_time_; }
+
+ protected:
+ virtual ~ClocklessAudioSink();
+
+ private:
+ scoped_ptr<ClocklessAudioSinkThread> thread_;
+ bool initialized_;
+ bool playing_;
+
+ // Time taken in last set of Render() calls.
+ base::TimeDelta playback_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(ClocklessAudioSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_CLOCKLESS_AUDIO_SINK_H_
diff --git a/chromium/media/audio/cras/audio_manager_cras.cc b/chromium/media/audio/cras/audio_manager_cras.cc
index 165d642922c..14a0c4e86ac 100644
--- a/chromium/media/audio/cras/audio_manager_cras.cc
+++ b/chromium/media/audio/cras/audio_manager_cras.cc
@@ -16,14 +16,21 @@
namespace media {
+static void AddDefaultDevice(AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+
+ // Cras will route audio from a proper physical device automatically.
+ device_names->push_back(
+ AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
+ AudioManagerBase::kDefaultDeviceId));
+}
+
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 50;
// Default sample rate for input and output streams.
static const int kDefaultSampleRate = 48000;
-const char AudioManagerCras::kLoopbackDeviceId[] = "loopback";
-
bool AudioManagerCras::HasAudioOutputDevices() {
return true;
}
@@ -45,10 +52,13 @@ void AudioManagerCras::ShowAudioInputSettings() {
}
void AudioManagerCras::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
- GetCrasAudioInputDevices(device_names);
- return;
+ AudioDeviceNames* device_names) {
+ AddDefaultDevice(device_names);
+}
+
+void AudioManagerCras::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ AddDefaultDevice(device_names);
}
AudioParameters AudioManagerCras::GetInputStreamParameters(
@@ -61,14 +71,6 @@ AudioParameters AudioManagerCras::GetInputStreamParameters(
kDefaultSampleRate, 16, kDefaultInputBufferSize);
}
-void AudioManagerCras::GetCrasAudioInputDevices(
- media::AudioDeviceNames* device_names) {
- // Cras will route audio from a proper physical device automatically.
- device_names->push_back(
- AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
- AudioManagerBase::kDefaultDeviceId));
-}
-
AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
@@ -76,7 +78,10 @@ AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
}
AudioOutputStream* AudioManagerCras::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
// TODO(dgreid): Open the correct input device for unified IO.
return MakeOutputStream(params);
@@ -95,7 +100,10 @@ AudioInputStream* AudioManagerCras::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 512;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
diff --git a/chromium/media/audio/cras/audio_manager_cras.h b/chromium/media/audio/cras/audio_manager_cras.h
index fdc5b02688a..3b0ef530e07 100644
--- a/chromium/media/audio/cras/audio_manager_cras.h
+++ b/chromium/media/audio/cras/audio_manager_cras.h
@@ -15,18 +15,16 @@ namespace media {
class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
public:
- // Unique ID of the "loopback" input device. This device captures post-mix,
- // pre-DSP system audio.
- static const char kLoopbackDeviceId[];
-
AudioManagerCras();
// AudioManager implementation.
virtual bool HasAudioOutputDevices() OVERRIDE;
virtual bool HasAudioInputDevices() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
+ virtual void GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
@@ -35,6 +33,7 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -45,12 +44,10 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
virtual ~AudioManagerCras();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
- // Gets a list of available cras input devices.
- void GetCrasAudioInputDevices(media::AudioDeviceNames* device_names);
-
// Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
AudioOutputStream* MakeOutputStream(const AudioParameters& params);
diff --git a/chromium/media/audio/cras/cras_input.cc b/chromium/media/audio/cras/cras_input.cc
index a82fe283f7a..fd574dc86e5 100644
--- a/chromium/media/audio/cras/cras_input.cc
+++ b/chromium/media/audio/cras/cras_input.cc
@@ -26,9 +26,8 @@ CrasInputStream::CrasInputStream(const AudioParameters& params,
params_(params),
started_(false),
stream_id_(0),
- stream_direction_(device_id == AudioManagerCras::kLoopbackDeviceId
- ? CRAS_STREAM_POST_MIX_PRE_DSP
- : CRAS_STREAM_INPUT) {
+ stream_direction_(device_id == AudioManagerBase::kLoopbackInputDeviceId ?
+ CRAS_STREAM_POST_MIX_PRE_DSP : CRAS_STREAM_INPUT) {
DCHECK(audio_manager_);
}
@@ -114,7 +113,6 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
StartAgc();
callback_ = callback;
- LOG(ERROR) << "Input Start";
// Prepare |audio_format| and |stream_params| for the stream we
// will create.
diff --git a/chromium/media/audio/cras/cras_unified.cc b/chromium/media/audio/cras/cras_unified.cc
index c1c3ee9228f..a7741864b31 100644
--- a/chromium/media/audio/cras/cras_unified.cc
+++ b/chromium/media/audio/cras/cras_unified.cc
@@ -168,7 +168,6 @@ void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
if (is_playing_)
return;
- LOG(ERROR) << "Unified Start";
// Prepare |audio_format| and |stream_params| for the stream we
// will create.
cras_audio_format* audio_format = cras_audio_format_create(
diff --git a/chromium/media/audio/cross_process_notification.cc b/chromium/media/audio/cross_process_notification.cc
deleted file mode 100644
index 1806f777da3..00000000000
--- a/chromium/media/audio/cross_process_notification.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/cross_process_notification.h"
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-
-CrossProcessNotification::CrossProcessNotification() {}
-
-CrossProcessNotification::WaitForMultiple::WaitForMultiple(
- const Notifications* notifications) {
- Reset(notifications);
-}
-
-int CrossProcessNotification::WaitForMultiple::Wait() {
- DCHECK(CalledOnValidThread());
- int ret = WaitMultiple(*notifications_, wait_offset_);
- wait_offset_ = (ret + 1) % notifications_->size();
- return ret;
-}
-
-void CrossProcessNotification::WaitForMultiple::Reset(
- const Notifications* notifications) {
- DCHECK(CalledOnValidThread());
- wait_offset_ = 0;
- notifications_ = notifications;
- DCHECK(!notifications_->empty());
-}
diff --git a/chromium/media/audio/cross_process_notification.h b/chromium/media/audio/cross_process_notification.h
deleted file mode 100644
index 16f2fc07fcf..00000000000
--- a/chromium/media/audio/cross_process_notification.h
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
-#define MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/process/process.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/base/media_export.h"
-
-#if defined(OS_WIN)
-#include "base/win/scoped_handle.h"
-#else
-#include "base/file_descriptor_posix.h"
-#include "base/sync_socket.h"
-#endif
-
-// A mechanism to synchronize access to a shared resource between two parties
-// when the usage pattern resembles that of two players playing a game of chess.
-// Each end has an instance of CrossProcessNotification and calls Signal() when
-// it has finished using the shared resource.
-// Before accessing the resource, it must call Wait() in order to know when the
-// other end has called Signal().
-//
-// Here's some pseudo code for how this class can be used:
-//
-// This method is used by both processes as it's a general way to use the
-// shared resource and then grant the privilege to the other process:
-//
-// void WriteToSharedMemory(CrossProcessNotification* notification,
-// SharedMemory* mem,
-// const char my_char) {
-// notification->Wait(); // Wait for the other process to yield access.
-// reinterpret_cast<char*>(mem->memory())[0] = my_char;
-// notification->Signal(); // Grant the other process access.
-// }
-//
-// Process A:
-//
-// class A {
-// public:
-// void Initialize(base::ProcessHandle process_b) {
-// mem_.CreateNamed("foo", false, 1024);
-//
-// CrossProcessNotification other;
-// CHECK(CrossProcessNotification::InitializePair(&notification_, &other));
-// CrossProcessNotification::IPCHandle handle_1, handle_2;
-// CHECK(other.ShareToProcess(process_b, &handle_1, &handle_2));
-// // This could be implemented by using some IPC mechanism
-// // such as MessageLoop.
-// SendToProcessB(mem_, handle_1, handle_2);
-// // Allow process B the first chance to write to the memory:
-// notification_.Signal();
-// // Once B is done, we'll write 'A' to the shared memory.
-// WriteToSharedMemory(&notification_, &mem_, 'A');
-// }
-//
-// CrossProcessNotification notification_;
-// SharedMemory mem_;
-// };
-//
-// Process B:
-//
-// class B {
-// public:
-// // Called when we receive the IPC message from A.
-// void Initialize(SharedMemoryHandle mem,
-// CrossProcessNotification::IPCHandle handle_1,
-// CrossProcessNotification::IPCHandle handle_2) {
-// mem_.reset(new SharedMemory(mem, false));
-// notification_.reset(new CrossProcessNotification(handle_1, handle_2));
-// WriteToSharedMemory(&notification_, &mem_, 'B');
-// }
-//
-// CrossProcessNotification notification_;
-// scoped_ptr<SharedMemory> mem_;
-// };
-//
-class MEDIA_EXPORT CrossProcessNotification {
- public:
-#if defined(OS_WIN)
- typedef HANDLE IPCHandle;
-#else
- typedef base::FileDescriptor IPCHandle;
-#endif
-
- typedef std::vector<CrossProcessNotification*> Notifications;
-
- // Default ctor. Initializes a NULL notification. User must call
- // InitializePair() to initialize the instance along with a connected one.
- CrossProcessNotification();
-
- // Ctor for the user that does not call InitializePair but instead receives
- // handles from the one that did. These handles come from a call to
- // ShareToProcess.
- CrossProcessNotification(IPCHandle handle_1, IPCHandle handle_2);
- ~CrossProcessNotification();
-
- // Raises a signal that the shared resource now can be accessed by the other
- // party.
- // NOTE: Calling Signal() more than once without calling Wait() in between
- // is not a supported scenario and will result in undefined behavior (and
- // different depending on platform).
- void Signal();
-
- // Waits for the other party to finish using the shared resource.
- // NOTE: As with Signal(), you must not call Wait() more than once without
- // calling Signal() in between.
- void Wait();
-
- bool IsValid() const;
-
- // Copies the internal handles to the output parameters, |handle_1| and
- // |handle_2|. The operation can fail, so the caller must be prepared to
- // handle that case.
- bool ShareToProcess(base::ProcessHandle process, IPCHandle* handle_1,
- IPCHandle* handle_2);
-
- // Initializes a pair of CrossProcessNotification instances. Note that this
- // can fail (e.g. due to EMFILE on Linux).
- static bool InitializePair(CrossProcessNotification* a,
- CrossProcessNotification* b);
-
- // Use an instance of this class when you have to repeatedly wait for multiple
- // notifications on the same thread. The class will store information about
- // which notification was last signaled and try to distribute the signals so
- // that all notifications get a chance to be processed in times of high load
- // and a busy one won't starve the others.
- // TODO(tommi): Support a way to abort the wait.
- class MEDIA_EXPORT WaitForMultiple :
- public NON_EXPORTED_BASE(base::NonThreadSafe) {
- public:
- // Caller must make sure that the lifetime of the array is greater than
- // that of the WaitForMultiple instance.
- explicit WaitForMultiple(const Notifications* notifications);
-
- // Waits for any of the notifications to be signaled. Returns the 0 based
- // index of a signaled notification.
- int Wait();
-
- // Call when the array changes. This should be called on the same thread
- // as Wait() is called on and the array must never change while a Wait()
- // is in progress.
- void Reset(const Notifications* notifications);
-
- private:
- const Notifications* notifications_;
- size_t wait_offset_;
- };
-
- private:
- // Only called by the WaitForMultiple class. See documentation
- // for WaitForMultiple and comments inside WaitMultiple for details.
- static int WaitMultiple(const Notifications& notifications,
- size_t wait_offset);
-
-#if defined(OS_WIN)
- base::win::ScopedHandle mine_;
- base::win::ScopedHandle other_;
-#else
- typedef base::CancelableSyncSocket SocketClass;
- SocketClass socket_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(CrossProcessNotification);
-};
-
-#endif // MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
diff --git a/chromium/media/audio/cross_process_notification_posix.cc b/chromium/media/audio/cross_process_notification_posix.cc
deleted file mode 100644
index d5683495ef9..00000000000
--- a/chromium/media/audio/cross_process_notification_posix.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/cross_process_notification.h"
-
-#include <errno.h>
-#include <sys/poll.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/file_descriptor_posix.h"
-
-CrossProcessNotification::~CrossProcessNotification() {}
-
-CrossProcessNotification::CrossProcessNotification(IPCHandle handle_1,
- IPCHandle handle_2)
- : socket_(handle_1.fd) {
- DCHECK_NE(handle_1.fd, -1);
- DCHECK_EQ(handle_2.fd, -1);
- DCHECK(IsValid());
-}
-
-void CrossProcessNotification::Signal() {
- DCHECK(IsValid());
- char signal = 1;
- size_t bytes = socket_.Send(&signal, sizeof(signal));
- DCHECK_EQ(bytes, 1U) << "errno: " << errno;
-}
-
-void CrossProcessNotification::Wait() {
- DCHECK(IsValid());
- char signal = 0;
- size_t bytes = socket_.Receive(&signal, sizeof(signal));
- DCHECK_EQ(bytes, 1U) << "errno: " << errno;
- DCHECK_EQ(signal, 1);
-}
-
-bool CrossProcessNotification::IsValid() const {
- return socket_.handle() != SocketClass::kInvalidHandle;
-}
-
-bool CrossProcessNotification::ShareToProcess(base::ProcessHandle process,
- IPCHandle* handle_1,
- IPCHandle* handle_2) {
- DCHECK(IsValid());
- handle_1->fd = socket_.handle();
- handle_1->auto_close = false;
- handle_2->fd = -1;
- return true;
-}
-
-// static
-bool CrossProcessNotification::InitializePair(CrossProcessNotification* a,
- CrossProcessNotification* b) {
- DCHECK(!a->IsValid());
- DCHECK(!b->IsValid());
-
- bool ok = SocketClass::CreatePair(&a->socket_, &b->socket_);
-
- DLOG_IF(WARNING, !ok) << "failed to create socket: " << errno;
- DCHECK(!ok || a->IsValid());
- DCHECK(!ok || b->IsValid());
- return ok;
-}
-
-// static
-int CrossProcessNotification::WaitMultiple(const Notifications& notifications,
- size_t wait_offset) {
- DCHECK_LT(wait_offset, notifications.size());
-
- for (size_t i = 0; i < notifications.size(); ++i) {
- DCHECK(notifications[i]->IsValid());
- }
-
- // Below, we always check the |revents| of the first socket in the array
- // and return the index of that socket if set. This can cause sockets
- // that come later in the array to starve when the first sockets are
- // very busy. So to avoid the starving problem, we use the |wait_offset|
- // variable to split up the array so that the last socket to be signaled
- // becomes the last socket in the array and all the other sockets will have
- // priority the next time WaitMultiple is called.
- scoped_ptr<struct pollfd[]> sockets(new struct pollfd[notifications.size()]);
- memset(&sockets[0], 0, notifications.size() * sizeof(sockets[0]));
- size_t index = 0;
- for (size_t i = wait_offset; i < notifications.size(); ++i) {
- struct pollfd& fd = sockets[index++];
- fd.events = POLLIN;
- fd.fd = notifications[i]->socket_.handle();
- }
-
- for (size_t i = 0; i < wait_offset; ++i) {
- struct pollfd& fd = sockets[index++];
- fd.events = POLLIN;
- fd.fd = notifications[i]->socket_.handle();
- }
- DCHECK_EQ(index, notifications.size());
-
- int err = poll(&sockets[0], notifications.size(), -1);
- if (err != -1) {
- for (size_t i = 0; i < notifications.size(); ++i) {
- if (sockets[i].revents) {
- size_t ret = (i + wait_offset) % notifications.size();
- DCHECK_EQ(sockets[i].fd, notifications[ret]->socket_.handle());
- notifications[ret]->Wait();
- return ret;
- }
- }
- }
- // Either poll() failed or we failed to find a single socket that was
- // signaled. Either way continuing will result in undefined behavior.
- LOG(FATAL) << "poll() failed: " << errno;
- return -1;
-}
diff --git a/chromium/media/audio/cross_process_notification_unittest.cc b/chromium/media/audio/cross_process_notification_unittest.cc
deleted file mode 100644
index a27219496cb..00000000000
--- a/chromium/media/audio/cross_process_notification_unittest.cc
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/memory/shared_memory.h"
-#include "base/process/kill.h"
-#include "base/stl_util.h"
-#include "base/test/multiprocess_test.h"
-#include "base/threading/platform_thread.h"
-#include "media/audio/cross_process_notification.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/multiprocess_func_list.h"
-
-#include <utility> // NOLINT
-
-namespace {
-
-// Initializes (ctor) and deletes (dtor) two vectors of pairs of
-// CrossProcessNotification instances.
-class NotificationsOwner {
- public:
- // Attempts to create up to |number_of_pairs| number of pairs. Call size()
- // after construction to find out how many pairs were actually created.
- explicit NotificationsOwner(size_t number_of_pairs) {
- CreateMultiplePairs(number_of_pairs);
- }
- ~NotificationsOwner() {
- STLDeleteElements(&a_);
- STLDeleteElements(&b_);
- }
-
- size_t size() const {
- DCHECK_EQ(a_.size(), b_.size());
- return a_.size();
- }
-
- const CrossProcessNotification::Notifications& a() { return a_; }
- const CrossProcessNotification::Notifications& b() { return b_; }
-
- private:
- void CreateMultiplePairs(size_t count) {
- a_.resize(count);
- b_.resize(count);
- size_t i = 0;
- for (; i < count; ++i) {
- a_[i] = new CrossProcessNotification();
- b_[i] = new CrossProcessNotification();
- if (!CrossProcessNotification::InitializePair(a_[i], b_[i])) {
- LOG(WARNING) << "InitializePair failed at " << i;
- delete a_[i];
- delete b_[i];
- break;
- }
- }
- a_.resize(i);
- b_.resize(i);
- }
-
- CrossProcessNotification::Notifications a_;
- CrossProcessNotification::Notifications b_;
-};
-
-// A simple thread that we'll run two instances of. Both threads get a pointer
-// to the same |shared_data| and use a CrossProcessNotification to control when
-// each thread can read/write.
-class SingleNotifierWorker : public base::PlatformThread::Delegate {
- public:
- SingleNotifierWorker(size_t* shared_data, size_t repeats,
- CrossProcessNotification* notifier)
- : shared_data_(shared_data), repeats_(repeats),
- notifier_(notifier) {
- }
- virtual ~SingleNotifierWorker() {}
-
- // base::PlatformThread::Delegate:
- virtual void ThreadMain() OVERRIDE {
- for (size_t i = 0; i < repeats_; ++i) {
- notifier_->Wait();
- ++(*shared_data_);
- notifier_->Signal();
- }
- }
-
- private:
- size_t* shared_data_;
- size_t repeats_;
- CrossProcessNotification* notifier_;
- DISALLOW_COPY_AND_ASSIGN(SingleNotifierWorker);
-};
-
-// Similar to SingleNotifierWorker, except each instance of this class will
-// have >1 instances of CrossProcessNotification to Wait/Signal and an equal
-// amount of |shared_data| that the notifiers control access to.
-class MultiNotifierWorker : public base::PlatformThread::Delegate {
- public:
- MultiNotifierWorker(size_t* shared_data, size_t repeats,
- const CrossProcessNotification::Notifications* notifiers)
- : shared_data_(shared_data), repeats_(repeats),
- notifiers_(notifiers) {
- }
- virtual ~MultiNotifierWorker() {}
-
- // base::PlatformThread::Delegate:
- virtual void ThreadMain() OVERRIDE {
- CrossProcessNotification::WaitForMultiple waiter(notifiers_);
- for (size_t i = 0; i < repeats_; ++i) {
- int signaled = waiter.Wait();
- ++shared_data_[signaled];
- (*notifiers_)[signaled]->Signal();
- }
- }
-
- private:
- size_t* shared_data_;
- size_t repeats_;
- const CrossProcessNotification::Notifications* notifiers_;
- DISALLOW_COPY_AND_ASSIGN(MultiNotifierWorker);
-};
-
-// A fixed array of bool flags. Each flag uses 1 bit. Use sizeof(FlagArray)
-// to determine how much memory you need. The number of flags will therefore
-// be sizeof(FlagArray) * 8.
-// We use 'struct' to signify that this structures represents compiler
-// independent structured data. I.e. you must be able to map this class
-// to a piece of shared memory of size sizeof(FlagArray) and be able to
-// use the class. No vtables etc.
-// TODO(tommi): Move this to its own header when we start using it for signaling
-// audio devices. As is, it's just here for perf comparison against the
-// "multiple notifiers" approach.
-struct FlagArray {
- public:
- FlagArray() : flags_() {}
-
- bool is_set(size_t index) const {
- return (flags_[index >> 5] & (1 << (index & 31)));
- }
-
- void set(size_t index) {
- flags_[index >> 5] |= (1U << (static_cast<uint32>(index) & 31));
- }
-
- void clear(size_t index) {
- flags_[index >> 5] &= ~(1U << (static_cast<uint32>(index) & 31));
- }
-
- // Returns the number of flags that can be set/checked.
- size_t size() const { return sizeof(flags_) * 8; }
-
- private:
- // 256 * 32 = 8192 flags in 1KB.
- uint32 flags_[256];
- DISALLOW_COPY_AND_ASSIGN(FlagArray);
-};
-
-class MultiNotifierWorkerFlagArray : public base::PlatformThread::Delegate {
- public:
- MultiNotifierWorkerFlagArray(size_t count, FlagArray* signals,
- size_t* shared_data, size_t repeats,
- CrossProcessNotification* notifier)
- : count_(count), signals_(signals), shared_data_(shared_data),
- repeats_(repeats), notifier_(notifier) {
- }
- virtual ~MultiNotifierWorkerFlagArray() {}
-
- // base::PlatformThread::Delegate:
- virtual void ThreadMain() OVERRIDE {
- for (size_t i = 0; i < repeats_; ++i) {
- notifier_->Wait();
- for (size_t s = 0; s < count_; ++s) {
- if (signals_->is_set(s)) {
- ++shared_data_[s];
- // We don't clear the flag here but simply leave it signaled because
- // we want the other thread to also increment this variable.
- }
- }
- notifier_->Signal();
- }
- }
-
- private:
- size_t count_;
- FlagArray* signals_;
- size_t* shared_data_;
- size_t repeats_;
- CrossProcessNotification* notifier_;
- DISALLOW_COPY_AND_ASSIGN(MultiNotifierWorkerFlagArray);
-};
-
-} // end namespace
-
-TEST(CrossProcessNotification, FlagArray) {
- FlagArray flags;
- EXPECT_GT(flags.size(), 1000U);
- for (size_t i = 0; i < flags.size(); ++i) {
- EXPECT_FALSE(flags.is_set(i));
- flags.set(i);
- EXPECT_TRUE(flags.is_set(i));
- flags.clear(i);
- EXPECT_FALSE(flags.is_set(i));
- }
-}
-
-// Initializes two notifiers, signals the each one and make sure the others
-// wait is satisfied.
-TEST(CrossProcessNotification, Basic) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
- EXPECT_TRUE(a.IsValid());
- EXPECT_TRUE(b.IsValid());
-
- a.Signal();
- b.Wait();
-
- b.Signal();
- a.Wait();
-}
-
-// Spins two worker threads, each with their own CrossProcessNotification
-// that they use to read and write from a shared memory buffer.
-// Disabled as it trips of the TSAN bot (false positive since TSAN doesn't
-// recognize sockets as being a synchronization primitive).
-TEST(CrossProcessNotification, DISABLED_TwoThreads) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
-
- size_t data = 0;
- const size_t kRepeats = 10000;
- SingleNotifierWorker worker1(&data, kRepeats, &a);
- SingleNotifierWorker worker2(&data, kRepeats, &b);
- base::PlatformThreadHandle thread1, thread2;
- base::PlatformThread::Create(0, &worker1, &thread1);
- base::PlatformThread::Create(0, &worker2, &thread2);
-
- // Start the first thread. They should ping pong a few times and take turns
- // incrementing the shared variable and never step on each other's toes.
- a.Signal();
-
- base::PlatformThread::Join(thread1);
- base::PlatformThread::Join(thread2);
-
- EXPECT_EQ(kRepeats * 2, data);
-}
-
-// Uses a pair of threads to access up to 1000 pieces of synchronized shared
-// data. On regular dev machines, the number of notifiers should be 1000, but on
-// mac and linux bots, the number will be smaller due to the RLIMIT_NOFILE
-// limit. Specifically, linux will have this limit at 1024 which means for this
-// test that the max number of notifiers will be in the range 500-512. On Mac
-// the limit is 256, so |count| will be ~120. Oh, and raising the limit via
-// setrlimit() won't work.
-// DISABLED since the distribution won't be accurate when run on valgrind.
-TEST(CrossProcessNotification, DISABLED_ThousandNotifiersTwoThreads) {
- const size_t kCount = 1000;
- NotificationsOwner pairs(kCount);
- size_t data[kCount] = {0};
- // We use a multiple of the count so that the division in the check below
- // will be nice and round.
- size_t repeats = pairs.size() * 1;
-
- MultiNotifierWorker worker_1(&data[0], repeats, &pairs.a());
- MultiNotifierWorker worker_2(&data[0], repeats, &pairs.b());
- base::PlatformThreadHandle thread_1, thread_2;
- base::PlatformThread::Create(0, &worker_1, &thread_1);
- base::PlatformThread::Create(0, &worker_2, &thread_2);
-
- for (size_t i = 0; i < pairs.size(); ++i)
- pairs.a()[i]->Signal();
-
- base::PlatformThread::Join(thread_1);
- base::PlatformThread::Join(thread_2);
-
- size_t expected_total = pairs.size() * 2;
- size_t total = 0;
- for (size_t i = 0; i < pairs.size(); ++i) {
- // The CrossProcessNotification::WaitForMultiple class should have ensured
- // that all notifiers had the same quality of service.
- EXPECT_EQ(expected_total / pairs.size(), data[i]);
- total += data[i];
- }
- EXPECT_EQ(expected_total, total);
-}
-
-// Functionally equivalent (as far as the shared data goes) to the
-// ThousandNotifiersTwoThreads test but uses a single pair of notifiers +
-// FlagArray for the 1000 signals. This approach is significantly faster.
-// Disabled as it trips of the TSAN bot - "Possible data race during write of
-// size 4" (the flag array).
-TEST(CrossProcessNotification, DISABLED_TwoNotifiersTwoThreads1000Signals) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
-
- const size_t kCount = 1000;
- FlagArray signals;
- ASSERT_GE(signals.size(), kCount);
- size_t data[kCount] = {0};
-
- // Since this algorithm checks all events each time the notifier is
- // signaled, |repeat| doesn't mean the same thing here as it does in
- // ThousandNotifiersTwoThreads. 1 repeat here is the same as kCount
- // repeats in ThousandNotifiersTwoThreads.
- size_t repeats = 1;
- MultiNotifierWorkerFlagArray worker1(kCount, &signals, &data[0], repeats, &a);
- MultiNotifierWorkerFlagArray worker2(kCount, &signals, &data[0], repeats, &b);
- base::PlatformThreadHandle thread1, thread2;
- base::PlatformThread::Create(0, &worker1, &thread1);
- base::PlatformThread::Create(0, &worker2, &thread2);
-
- for (size_t i = 0; i < kCount; ++i)
- signals.set(i);
- a.Signal();
-
- base::PlatformThread::Join(thread1);
- base::PlatformThread::Join(thread2);
-
- size_t expected_total = kCount * 2;
- size_t total = 0;
- for (size_t i = 0; i < kCount; ++i) {
- // Since for each signal, we process all signaled events, the shared data
- // variables should all be equal.
- EXPECT_EQ(expected_total / kCount, data[i]);
- total += data[i];
- }
- EXPECT_EQ(expected_total, total);
-}
-
-// Test the maximum number of notifiers without spinning further wait
-// threads on Windows. This test assumes we can always create 64 pairs and
-// bails if we can't.
-TEST(CrossProcessNotification, MultipleWaits64) {
- const size_t kCount = 64;
- NotificationsOwner pairs(kCount);
- ASSERT_TRUE(pairs.size() == kCount);
-
- CrossProcessNotification::WaitForMultiple waiter(&pairs.b());
- for (size_t i = 0; i < kCount; ++i) {
- pairs.a()[i]->Signal();
- int index = waiter.Wait();
- EXPECT_EQ(i, static_cast<size_t>(index));
- }
-}
-
-// Tests waiting for more notifiers than the OS supports on one thread.
-// The test will create at most 1000 pairs, but on mac/linux bots the actual
-// number will be lower. See comment about the RLIMIT_NOFILE limit above for
-// more details.
-// DISABLED since the distribution won't be accurate when run on valgrind.
-TEST(CrossProcessNotification, DISABLED_MultipleWaits1000) {
- // A 1000 notifiers requires 16 threads on Windows, including the current
- // one, to perform the wait operation.
- const size_t kCount = 1000;
- NotificationsOwner pairs(kCount);
-
- for (size_t i = 0; i < pairs.size(); ++i) {
- pairs.a()[i]->Signal();
- // To disable the load distribution algorithm and force the extra worker
- // thread(s) to catch the signaled event, we define the |waiter| inside
- // the loop.
- CrossProcessNotification::WaitForMultiple waiter(&pairs.b());
- int index = waiter.Wait();
- EXPECT_EQ(i, static_cast<size_t>(index));
- }
-}
-
-class CrossProcessNotificationMultiProcessTest : public base::MultiProcessTest {
-};
-
-namespace {
-
-// A very crude IPC mechanism that we use to set up the spawned child process
-// and the parent process.
-struct CrudeIpc {
- uint8 ready;
- CrossProcessNotification::IPCHandle handle_1;
- CrossProcessNotification::IPCHandle handle_2;
-};
-
-#if defined(OS_POSIX)
-const int kPosixChildSharedMem = 30;
-#else
-const char kSharedMemName[] = "CrossProcessNotificationMultiProcessTest";
-#endif
-
-const size_t kSharedMemSize = 1024;
-
-} // namespace
-
-// The main routine of the child process. Waits for the parent process
-// to copy handles over to the child and then uses a CrossProcessNotification to
-// wait and signal to the parent process.
-MULTIPROCESS_TEST_MAIN(CrossProcessNotificationChildMain) {
-#if defined(OS_POSIX)
- base::SharedMemory mem(
- base::SharedMemoryHandle(kPosixChildSharedMem, true /* auto close */),
- false);
-#else
- base::SharedMemory mem;
- CHECK(mem.CreateNamed(kSharedMemName, true, kSharedMemSize));
-#endif
-
- CHECK(mem.Map(kSharedMemSize));
- CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory());
-
- while (!ipc->ready)
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
-
- CrossProcessNotification notifier(ipc->handle_1, ipc->handle_2);
- notifier.Wait();
- notifier.Signal();
-
- return 0;
-}
-
-// Spawns a new process and hands a CrossProcessNotification instance to the
-// new process. Once that's done, it waits for the child process to signal
-// it's end and quits.
-TEST_F(CrossProcessNotificationMultiProcessTest, Basic) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
- EXPECT_TRUE(a.IsValid());
- EXPECT_TRUE(b.IsValid());
-
- base::SharedMemory mem;
-
-#if defined(OS_POSIX)
- ASSERT_TRUE(mem.CreateAndMapAnonymous(kSharedMemSize));
-#else
- mem.Delete(kSharedMemName); // In case a previous run was unsuccessful.
- ASSERT_TRUE(mem.CreateNamed(kSharedMemName, false, kSharedMemSize));
- ASSERT_TRUE(mem.Map(kSharedMemSize));
-#endif
-
- CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory());
- ipc->ready = false;
-
-#if defined(OS_POSIX)
- const int kPosixChildSocket = 20;
- EXPECT_TRUE(b.ShareToProcess(
- base::kNullProcessHandle, &ipc->handle_1, &ipc->handle_2));
- base::FileHandleMappingVector fd_mapping_vec;
- fd_mapping_vec.push_back(std::make_pair(ipc->handle_1.fd, kPosixChildSocket));
- fd_mapping_vec.push_back(
- std::make_pair(mem.handle().fd, kPosixChildSharedMem));
- ipc->handle_1.fd = kPosixChildSocket;
- base::ProcessHandle process = SpawnChild("CrossProcessNotificationChildMain",
- fd_mapping_vec, false);
-#else
- base::ProcessHandle process = SpawnChild("CrossProcessNotificationChildMain",
- false);
- EXPECT_TRUE(b.ShareToProcess(process, &ipc->handle_1, &ipc->handle_2));
-#endif
-
- ipc->ready = true;
-
- a.Signal();
- a.Wait();
-
- int exit_code = -1;
- base::WaitForExitCode(process, &exit_code);
- EXPECT_EQ(0, exit_code);
-}
diff --git a/chromium/media/audio/cross_process_notification_win.cc b/chromium/media/audio/cross_process_notification_win.cc
deleted file mode 100644
index 53bf0f4525e..00000000000
--- a/chromium/media/audio/cross_process_notification_win.cc
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/cross_process_notification.h"
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/platform_thread.h"
-#include "base/win/scoped_handle.h"
-
-CrossProcessNotification::~CrossProcessNotification() {}
-
-CrossProcessNotification::CrossProcessNotification(IPCHandle handle_1,
- IPCHandle handle_2)
- : mine_(handle_1), other_(handle_2) {
- DCHECK(IsValid());
-}
-
-void CrossProcessNotification::Signal() {
- DCHECK(IsValid());
- DCHECK_EQ(::WaitForSingleObject(mine_, 0), static_cast<DWORD>(WAIT_TIMEOUT))
- << "Are you calling Signal() without calling Wait() first?";
- BOOL ok = ::SetEvent(mine_);
- CHECK(ok);
-}
-
-void CrossProcessNotification::Wait() {
- DCHECK(IsValid());
- DWORD wait = ::WaitForSingleObject(other_, INFINITE);
- DCHECK_EQ(wait, WAIT_OBJECT_0);
- BOOL ok = ::ResetEvent(other_);
- CHECK(ok);
-}
-
-bool CrossProcessNotification::IsValid() const {
- return mine_.IsValid() && other_.IsValid();
-}
-
-bool CrossProcessNotification::ShareToProcess(base::ProcessHandle process,
- IPCHandle* handle_1,
- IPCHandle* handle_2) {
- DCHECK(IsValid());
- HANDLE our_process = ::GetCurrentProcess();
- if (!::DuplicateHandle(our_process, mine_, process, handle_1, 0, FALSE,
- DUPLICATE_SAME_ACCESS)) {
- return false;
- }
-
- if (!::DuplicateHandle(our_process, other_, process, handle_2, 0, FALSE,
- DUPLICATE_SAME_ACCESS)) {
- // In case we're sharing to ourselves, we can close the handle, but
- // if the target process is a different process, we do nothing.
- if (process == our_process)
- ::CloseHandle(*handle_1);
- *handle_1 = NULL;
- return false;
- }
-
- return true;
-}
-
-// static
-bool CrossProcessNotification::InitializePair(CrossProcessNotification* a,
- CrossProcessNotification* b) {
- DCHECK(!a->IsValid());
- DCHECK(!b->IsValid());
-
- bool success = false;
-
- // Create two manually resettable events and give each party a handle
- // to both events.
- HANDLE event_a = ::CreateEvent(NULL, TRUE, FALSE, NULL);
- HANDLE event_b = ::CreateEvent(NULL, TRUE, FALSE, NULL);
- if (event_a && event_b) {
- a->mine_.Set(event_a);
- a->other_.Set(event_b);
- success = a->ShareToProcess(GetCurrentProcess(), &event_a, &event_b);
- if (success) {
- b->mine_.Set(event_b);
- b->other_.Set(event_a);
- } else {
- a->mine_.Close();
- a->other_.Close();
- }
- } else {
- if (event_a)
- ::CloseHandle(event_a);
- if (event_b)
- ::CloseHandle(event_b);
- }
-
- DCHECK(!success || a->IsValid());
- DCHECK(!success || b->IsValid());
-
- return success;
-}
-
-namespace {
-class ExtraWaitThread : public base::PlatformThread::Delegate {
- public:
- ExtraWaitThread(HANDLE stop, HANDLE* events, size_t count,
- int* signaled_event)
- : stop_(stop), events_(events), count_(count),
- signaled_event_(signaled_event) {
- *signaled_event_ = -1;
- }
- virtual ~ExtraWaitThread() {}
-
- virtual void ThreadMain() OVERRIDE {
- // Store the |stop_| event as the first event.
- HANDLE events[MAXIMUM_WAIT_OBJECTS] = { stop_ };
- HANDLE next_thread = NULL;
- DWORD event_count = MAXIMUM_WAIT_OBJECTS;
- int thread_signaled_event = -1;
- scoped_ptr<ExtraWaitThread> extra_wait_thread;
- if (count_ > (MAXIMUM_WAIT_OBJECTS - 1)) {
- std::copy(&events_[0], &events_[MAXIMUM_WAIT_OBJECTS - 2], &events[1]);
-
- extra_wait_thread.reset(new ExtraWaitThread(stop_,
- &events_[MAXIMUM_WAIT_OBJECTS - 2],
- count_ - (MAXIMUM_WAIT_OBJECTS - 2),
- &thread_signaled_event));
- base::PlatformThreadHandle handle;
- base::PlatformThread::Create(0, extra_wait_thread.get(), &handle);
- next_thread = handle.platform_handle();
-
- event_count = MAXIMUM_WAIT_OBJECTS;
- events[MAXIMUM_WAIT_OBJECTS - 1] = next_thread;
- } else {
- std::copy(&events_[0], &events_[count_], &events[1]);
- event_count = count_ + 1;
- }
-
- DWORD wait = ::WaitForMultipleObjects(event_count, &events[0], FALSE,
- INFINITE);
- if (wait >= WAIT_OBJECT_0 && wait < (WAIT_OBJECT_0 + event_count)) {
- wait -= WAIT_OBJECT_0;
- if (wait == 0) {
- // The stop event was signaled. Check if it was signaled by a
- // sub thread. In case our sub thread had to spin another thread (and
- // so on), we must wait for ours to exit before we can check the
- // propagated event offset.
- if (next_thread) {
- base::PlatformThread::Join(base::PlatformThreadHandle(next_thread));
- next_thread = NULL;
- }
- if (thread_signaled_event != -1)
- *signaled_event_ = thread_signaled_event + (MAXIMUM_WAIT_OBJECTS - 2);
- } else if (events[wait] == next_thread) {
- NOTREACHED();
- } else {
- *signaled_event_ = static_cast<int>(wait);
- SetEvent(stop_);
- }
- } else {
- NOTREACHED();
- }
-
- if (next_thread)
- base::PlatformThread::Join(base::PlatformThreadHandle(next_thread));
- }
-
- private:
- HANDLE stop_;
- HANDLE* events_;
- size_t count_;
- int* signaled_event_;
- DISALLOW_COPY_AND_ASSIGN(ExtraWaitThread);
-};
-} // end namespace
-
-// static
-int CrossProcessNotification::WaitMultiple(const Notifications& notifications,
- size_t wait_offset) {
- DCHECK_LT(wait_offset, notifications.size());
-
- for (size_t i = 0; i < notifications.size(); ++i) {
- DCHECK(notifications[i]->IsValid());
- }
-
- // TODO(tommi): Should we wait in an alertable state so that we can be
- // canceled via an APC?
- scoped_ptr<HANDLE[]> handles(new HANDLE[notifications.size()]);
-
- // Because of the way WaitForMultipleObjects works, we do a little trick here.
- // When multiple events are signaled, WaitForMultipleObjects will return the
- // index of the first signaled item (lowest). This means that if we always
- // pass the array the same way to WaitForMultipleObjects, the objects that
- // come first, have higher priority. In times of heavy load, this will cause
- // elements at the back to become DOS-ed.
- // So, we store the location of the item that was last signaled. Then we split
- // up the array and move everything higher than the last signaled index to the
- // front and the rest to the back (meaning that the last signaled item will
- // become the last element in the list).
- // Assuming equally busy events, this approach distributes the priority
- // evenly.
-
- size_t index = 0;
- for (size_t i = wait_offset; i < notifications.size(); ++i)
- handles[index++] = notifications[i]->other_;
-
- for (size_t i = 0; i < wait_offset; ++i)
- handles[index++] = notifications[i]->other_;
- DCHECK_EQ(index, notifications.size());
-
- DWORD wait = WAIT_FAILED;
- bool wait_failed = false;
- if (notifications.size() <= MAXIMUM_WAIT_OBJECTS) {
- wait = ::WaitForMultipleObjects(notifications.size(), &handles[0], FALSE,
- INFINITE);
- wait_failed = wait < WAIT_OBJECT_0 ||
- wait >= (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS);
- } else {
- // Used to stop the other wait threads when an event has been signaled.
- base::win::ScopedHandle stop(::CreateEvent(NULL, TRUE, FALSE, NULL));
-
- // Create the first thread and pass a pointer to all handles >63
- // to the thread + 'stop'. Then implement the thread so that it checks
- // if the number of handles is > 63. If so, spawns a new thread and
- // passes >62 handles to that thread and waits for the 62 handles + stop +
- // next thread. etc etc.
-
- // Create a list of threads so that each thread waits on at most 62 events
- // including one event for when a child thread signals completion and one
- // event for when all of the threads must be stopped (due to some event
- // being signaled).
-
- int thread_signaled_event = -1;
- ExtraWaitThread wait_thread(stop, &handles[MAXIMUM_WAIT_OBJECTS - 1],
- notifications.size() - (MAXIMUM_WAIT_OBJECTS - 1),
- &thread_signaled_event);
- base::PlatformThreadHandle thread;
- base::PlatformThread::Create(0, &wait_thread, &thread);
- HANDLE events[MAXIMUM_WAIT_OBJECTS];
- std::copy(&handles[0], &handles[MAXIMUM_WAIT_OBJECTS - 1], &events[0]);
- events[MAXIMUM_WAIT_OBJECTS - 1] = thread.platform_handle();
- wait = ::WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, &events[0], FALSE,
- INFINITE);
- wait_failed = wait < WAIT_OBJECT_0 ||
- wait >= (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS);
- if (wait == WAIT_OBJECT_0 + (MAXIMUM_WAIT_OBJECTS - 1)) {
- if (thread_signaled_event < 0) {
- wait_failed = true;
- NOTREACHED();
- } else {
- wait = WAIT_OBJECT_0 + (MAXIMUM_WAIT_OBJECTS - 2) +
- thread_signaled_event;
- }
- } else {
- ::SetEvent(stop);
- }
- base::PlatformThread::Join(thread);
- }
-
- int ret = -1;
- if (!wait_failed) {
- // Subtract to be politically correct (WAIT_OBJECT_0 is actually 0).
- wait -= WAIT_OBJECT_0;
- BOOL ok = ::ResetEvent(handles[wait]);
- CHECK(ok);
- ret = (wait + wait_offset) % notifications.size();
- DCHECK_EQ(handles[wait], notifications[ret]->other_.Get());
- } else {
- NOTREACHED();
- }
-
- CHECK_NE(ret, -1);
- return ret;
-}
diff --git a/chromium/media/audio/ios/audio_manager_ios.h b/chromium/media/audio/ios/audio_manager_ios.h
deleted file mode 100644
index 19751502fd2..00000000000
--- a/chromium/media/audio/ios/audio_manager_ios.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
-#define MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
-
-#include "base/basictypes.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-class PCMQueueInAudioInputStream;
-
-// iOS implementation of the AudioManager singleton. Supports only audio input.
-class MEDIA_EXPORT AudioManagerIOS : public AudioManagerBase {
- public:
- AudioManagerIOS();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
- virtual AudioOutputStream* MakeAudioOutputStream(
- const AudioParameters& params,
- const std::string& input_device_id) OVERRIDE;
- virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioParameters GetInputStreamParameters(
- const std::string& device_id) OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params,
- const std::string& input_device_id) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
- virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
-
- protected:
- virtual ~AudioManagerIOS();
-
- virtual AudioParameters GetPreferredOutputStreamParameters(
- const AudioParameters& input_params) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioManagerIOS);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
diff --git a/chromium/media/audio/ios/audio_manager_ios.mm b/chromium/media/audio/ios/audio_manager_ios.mm
deleted file mode 100644
index 49479302efc..00000000000
--- a/chromium/media/audio/ios/audio_manager_ios.mm
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/ios/audio_manager_ios.h"
-
-#import <AudioToolbox/AudioToolbox.h>
-#import <AVFoundation/AVFoundation.h>
-
-#include "base/sys_info.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/fake_audio_input_stream.h"
-#include "media/audio/ios/audio_session_util_ios.h"
-#include "media/audio/mac/audio_input_mac.h"
-#include "media/base/channel_layout.h"
-#include "media/base/limits.h"
-
-namespace media {
-
-enum { kMaxInputChannels = 2 };
-
-AudioManagerIOS::AudioManagerIOS() {
-}
-
-AudioManagerIOS::~AudioManagerIOS() {
- Shutdown();
-}
-
-bool AudioManagerIOS::HasAudioOutputDevices() {
- return false;
-}
-
-bool AudioManagerIOS::HasAudioInputDevices() {
- if (!InitAudioSessionIOS())
- return false;
- // Note that the |kAudioSessionProperty_AudioInputAvailable| property is a
- // 32-bit integer, not a boolean.
- UInt32 property_size;
- OSStatus error =
- AudioSessionGetPropertySize(kAudioSessionProperty_AudioInputAvailable,
- &property_size);
- if (error != kAudioSessionNoError)
- return false;
- UInt32 audio_input_is_available = false;
- DCHECK(property_size == sizeof(audio_input_is_available));
- error = AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable,
- &property_size,
- &audio_input_is_available);
- return error == kAudioSessionNoError ? audio_input_is_available : false;
-}
-
-AudioParameters AudioManagerIOS::GetInputStreamParameters(
- const std::string& device_id) {
- // TODO(xians): figure out the right input sample rate and buffer size to
- // achieve the best audio performance for iOS devices.
- // TODO(xians): query the native channel layout for the specific device.
- static const int kDefaultSampleRate = 48000;
- static const int kDefaultBufferSize = 2048;
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultBufferSize);
-}
-
-AudioOutputStream* AudioManagerIOS::MakeAudioOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioInputStream* AudioManagerIOS::MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) {
- // Current line of iOS devices has only one audio input.
- // Ignore the device_id (unittest uses a test value in it).
- if (!params.IsValid() || (params.channels() > kMaxInputChannels))
- return NULL;
-
- if (params.format() == AudioParameters::AUDIO_FAKE)
- return FakeAudioInputStream::MakeFakeStream(this, params);
- else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR)
- return new PCMQueueInAudioInputStream(this, params);
- return NULL;
-}
-
-AudioOutputStream* AudioManagerIOS::MakeLinearOutputStream(
- const AudioParameters& params) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioOutputStream* AudioManagerIOS::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioInputStream* AudioManagerIOS::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- return MakeAudioInputStream(params, device_id);
-}
-
-AudioInputStream* AudioManagerIOS::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- NOTIMPLEMENTED(); // Only linear audio input is supported on iOS.
- return MakeAudioInputStream(params, device_id);
-}
-
-
-AudioParameters AudioManagerIOS::GetPreferredOutputStreamParameters(
- const AudioParameters& input_params) {
- // TODO(xians): handle the case when input_params is valid.
- // TODO(xians): figure out the right output sample rate and sample rate to
- // achieve the best audio performance for iOS devices.
- // TODO(xians): add support to --audio-buffer-size flag.
- static const int kDefaultSampleRate = 48000;
- static const int kDefaultBufferSize = 2048;
- if (input_params.IsValid()) {
- NOTREACHED();
- }
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultBufferSize);
-}
-
-// Called by the stream when it has been released by calling Close().
-void AudioManagerIOS::ReleaseOutputStream(AudioOutputStream* stream) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
-}
-
-// Called by the stream when it has been released by calling Close().
-void AudioManagerIOS::ReleaseInputStream(AudioInputStream* stream) {
- delete stream;
-}
-
-// static
-AudioManager* CreateAudioManager() {
- return new AudioManagerIOS();
-}
-
-} // namespace media
diff --git a/chromium/media/audio/ios/audio_manager_ios_unittest.cc b/chromium/media/audio/ios/audio_manager_ios_unittest.cc
deleted file mode 100644
index 30ebc04f204..00000000000
--- a/chromium/media/audio/ios/audio_manager_ios_unittest.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using namespace media;
-
-// Test that input is supported and output is not.
-TEST(IOSAudioTest, AudioSupport) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- ASSERT_TRUE(NULL != audio_manager.get());
- ASSERT_FALSE(audio_manager->HasAudioOutputDevices());
- ASSERT_TRUE(audio_manager->HasAudioInputDevices());
-}
-
-// Test that input stream can be opened and closed.
-TEST(IOSAudioTest, InputStreamOpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- ASSERT_TRUE(NULL != audio_manager.get());
- if (!audio_manager->HasAudioInputDevices())
- return;
- AudioInputStream* ias = audio_manager->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 1024),
- std::string("test_device"));
- ASSERT_TRUE(NULL != ias);
- EXPECT_TRUE(ias->Open());
- ias->Close();
-}
diff --git a/chromium/media/audio/ios/audio_session_util_ios.h b/chromium/media/audio/ios/audio_session_util_ios.h
deleted file mode 100644
index 175db91fae0..00000000000
--- a/chromium/media/audio/ios/audio_session_util_ios.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
-#define MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
-
-namespace media {
-
-// Initializes and configures the audio session, returning a bool indicating
-// whether initialization was successful. Can be called multiple times.
-// Safe to call from any thread.
-bool InitAudioSessionIOS();
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
diff --git a/chromium/media/audio/ios/audio_session_util_ios.mm b/chromium/media/audio/ios/audio_session_util_ios.mm
deleted file mode 100644
index a4071a04cc1..00000000000
--- a/chromium/media/audio/ios/audio_session_util_ios.mm
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/ios/audio_session_util_ios.h"
-
-#include <AVFoundation/AVFoundation.h>
-
-#include "base/logging.h"
-
-namespace media {
-
-bool InitAudioSessionIOS() {
- static bool kSessionInitialized = false;
- static dispatch_once_t once = 0;
- dispatch_once(&once, ^{
- OSStatus error = AudioSessionInitialize(NULL, NULL, NULL, NULL);
- if (error != kAudioSessionNoError)
- DLOG(ERROR) << "AudioSessionInitialize OSStatus error: " << error;
- BOOL result = [[AVAudioSession sharedInstance]
- setCategory:AVAudioSessionCategoryPlayAndRecord
- error:nil];
- if (!result)
- DLOG(ERROR) << "AVAudioSession setCategory failed";
- UInt32 allowMixing = true;
- AudioSessionSetProperty(
- kAudioSessionProperty_OverrideCategoryMixWithOthers,
- sizeof(allowMixing), &allowMixing);
- UInt32 defaultToSpeaker = true;
- AudioSessionSetProperty(
- kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
- sizeof(defaultToSpeaker),
- &defaultToSpeaker);
- // Speech input cannot be used if either of these two conditions fail.
- kSessionInitialized = (error == kAudioSessionNoError) && result;
- });
- return kSessionInitialized;
-}
-
-} // namespace media
diff --git a/chromium/media/audio/linux/alsa_output_unittest.cc b/chromium/media/audio/linux/alsa_output_unittest.cc
index 32456360f47..82fbab94c19 100644
--- a/chromium/media/audio/linux/alsa_output_unittest.cc
+++ b/chromium/media/audio/linux/alsa_output_unittest.cc
@@ -83,8 +83,10 @@ class MockAudioManagerLinux : public AudioManagerLinux {
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
diff --git a/chromium/media/audio/linux/audio_manager_linux.cc b/chromium/media/audio/linux/audio_manager_linux.cc
index 38253e2e651..708e4f26840 100644
--- a/chromium/media/audio/linux/audio_manager_linux.cc
+++ b/chromium/media/audio/linux/audio_manager_linux.cc
@@ -42,9 +42,9 @@ static const int kDefaultSampleRate = 48000;
// hence surround devices are not stored in the list.
static const char* kInvalidAudioInputDevices[] = {
"default",
+ "dmix",
"null",
"pulse",
- "dmix",
"surround",
};
@@ -103,9 +103,15 @@ void AudioManagerLinux::ShowAudioInputSettings() {
}
void AudioManagerLinux::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+ AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetAlsaAudioDevices(kStreamCapture, device_names);
+}
+
+void AudioManagerLinux::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
DCHECK(device_names->empty());
- GetAlsaAudioInputDevices(device_names);
+ GetAlsaAudioDevices(kStreamPlayback, device_names);
}
AudioParameters AudioManagerLinux::GetInputStreamParameters(
@@ -117,7 +123,8 @@ AudioParameters AudioManagerLinux::GetInputStreamParameters(
kDefaultSampleRate, 16, kDefaultInputBufferSize);
}
-void AudioManagerLinux::GetAlsaAudioInputDevices(
+void AudioManagerLinux::GetAlsaAudioDevices(
+ StreamType type,
media::AudioDeviceNames* device_names) {
// Constants specified by the ALSA API for device hints.
static const char kPcmInterfaceName[] = "pcm";
@@ -128,37 +135,40 @@ void AudioManagerLinux::GetAlsaAudioInputDevices(
void** hints = NULL;
int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
if (!error) {
- GetAlsaDevicesInfo(hints, device_names);
+ GetAlsaDevicesInfo(type, hints, device_names);
// Destroy the hints now that we're done with it.
wrapper_->DeviceNameFreeHint(hints);
} else {
- DLOG(WARNING) << "GetAudioInputDevices: unable to get device hints: "
+ DLOG(WARNING) << "GetAlsaAudioDevices: unable to get device hints: "
<< wrapper_->StrError(error);
}
}
}
void AudioManagerLinux::GetAlsaDevicesInfo(
- void** hints, media::AudioDeviceNames* device_names) {
+ AudioManagerLinux::StreamType type,
+ void** hints,
+ media::AudioDeviceNames* device_names) {
static const char kIoHintName[] = "IOID";
static const char kNameHintName[] = "NAME";
static const char kDescriptionHintName[] = "DESC";
- static const char kOutputDevice[] = "Output";
+
+ const char* unwanted_device_type = UnwantedDeviceTypeWhenEnumerating(type);
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
- // Only examine devices that are input capable. Valid values are
+ // Only examine devices of the right type. Valid values are
// "Input", "Output", and NULL which means both input and output.
scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
kIoHintName));
- if (io != NULL && strcmp(kOutputDevice, io.get()) == 0)
+ if (io != NULL && strcmp(unwanted_device_type, io.get()) == 0)
continue;
- // Found an input device, prepend the default device since we always want
- // it to be on the top of the list for all platforms. And there is no
- // duplicate counting here since it is only done if the list is still empty.
- // Note, pulse has exclusively opened the default device, so we must open
- // the device via the "default" moniker.
+ // Found a device, prepend the default device since we always want
+ // it to be on the top of the list for all platforms. And there is
+ // no duplicate counting here since it is only done if the list is
+ // still empty. Note, pulse has exclusively opened the default
+ // device, so we must open the device via the "default" moniker.
if (device_names->empty()) {
device_names->push_front(media::AudioDeviceName(
AudioManagerBase::kDefaultDeviceName,
@@ -170,7 +180,7 @@ void AudioManagerLinux::GetAlsaDevicesInfo(
wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
// Find out if the device is available.
- if (IsAlsaDeviceAvailable(unique_device_name.get())) {
+ if (IsAlsaDeviceAvailable(type, unique_device_name.get())) {
// Get the description for the device.
scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
*hint_iter, kDescriptionHintName));
@@ -196,25 +206,46 @@ void AudioManagerLinux::GetAlsaDevicesInfo(
}
}
-bool AudioManagerLinux::IsAlsaDeviceAvailable(const char* device_name) {
+// static
+bool AudioManagerLinux::IsAlsaDeviceAvailable(
+ AudioManagerLinux::StreamType type,
+ const char* device_name) {
if (!device_name)
return false;
- // Check if the device is in the list of invalid devices.
- for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
- if (strncmp(kInvalidAudioInputDevices[i], device_name,
- strlen(kInvalidAudioInputDevices[i])) == 0)
- return false;
+ // We do prefix matches on the device name to see whether to include
+ // it or not.
+ if (type == kStreamCapture) {
+ // Check if the device is in the list of invalid devices.
+ for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
+ if (strncmp(kInvalidAudioInputDevices[i], device_name,
+ strlen(kInvalidAudioInputDevices[i])) == 0)
+ return false;
+ }
+ return true;
+ } else {
+ DCHECK_EQ(kStreamPlayback, type);
+ // We prefer the device type that maps straight to hardware but
+ // goes through software conversion if needed (e.g. incompatible
+ // sample rate).
+ // TODO(joi): Should we prefer "hw" instead?
+ static const char kDeviceTypeDesired[] = "plughw";
+ return strncmp(kDeviceTypeDesired,
+ device_name,
+ arraysize(kDeviceTypeDesired) - 1) == 0;
}
+}
- return true;
+// static
+const char* AudioManagerLinux::UnwantedDeviceTypeWhenEnumerating(
+ AudioManagerLinux::StreamType wanted_type) {
+ return wanted_type == kStreamPlayback ? "Input" : "Output";
}
-bool AudioManagerLinux::HasAnyAlsaAudioDevice(StreamType stream) {
+bool AudioManagerLinux::HasAnyAlsaAudioDevice(
+ AudioManagerLinux::StreamType stream) {
static const char kPcmInterfaceName[] = "pcm";
static const char kIoHintName[] = "IOID";
- const char* kNotWantedDevice =
- (stream == kStreamPlayback ? "Input" : "Output");
void** hints = NULL;
bool has_device = false;
int card = -1;
@@ -230,7 +261,8 @@ bool AudioManagerLinux::HasAnyAlsaAudioDevice(StreamType stream) {
// "Input", "Output", and NULL which means both input and output.
scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
kIoHintName));
- if (io != NULL && strcmp(kNotWantedDevice, io.get()) == 0)
+ const char* unwanted_type = UnwantedDeviceTypeWhenEnumerating(stream);
+ if (io != NULL && strcmp(unwanted_type, io.get()) == 0)
continue; // Wrong type, skip the device.
// Found an input device.
@@ -258,7 +290,9 @@ AudioOutputStream* AudioManagerLinux::MakeLinearOutputStream(
AudioOutputStream* AudioManagerLinux::MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
// TODO(xians): Use input_device_id for unified IO.
return MakeOutputStream(params);
@@ -277,7 +311,10 @@ AudioInputStream* AudioManagerLinux::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerLinux::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 2048;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = kDefaultSampleRate;
diff --git a/chromium/media/audio/linux/audio_manager_linux.h b/chromium/media/audio/linux/audio_manager_linux.h
index 28abaa116e7..ab284dfdce9 100644
--- a/chromium/media/audio/linux/audio_manager_linux.h
+++ b/chromium/media/audio/linux/audio_manager_linux.h
@@ -25,8 +25,10 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
virtual bool HasAudioOutputDevices() OVERRIDE;
virtual bool HasAudioInputDevices() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
+ virtual void GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
@@ -35,6 +37,7 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -45,6 +48,7 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
virtual ~AudioManagerLinux();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
@@ -53,14 +57,22 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
kStreamCapture,
};
- // Gets a list of available ALSA input devices.
- void GetAlsaAudioInputDevices(media::AudioDeviceNames* device_names);
+ // Gets a list of available ALSA devices.
+ void GetAlsaAudioDevices(StreamType type,
+ media::AudioDeviceNames* device_names);
- // Gets the ALSA devices' names and ids.
- void GetAlsaDevicesInfo(void** hint, media::AudioDeviceNames* device_names);
+ // Gets the ALSA devices' names and ids that support streams of the
+ // given type.
+ void GetAlsaDevicesInfo(StreamType type,
+ void** hint,
+ media::AudioDeviceNames* device_names);
// Checks if the specific ALSA device is available.
- bool IsAlsaDeviceAvailable(const char* device_name);
+ static bool IsAlsaDeviceAvailable(StreamType type,
+ const char* device_name);
+
+ static const char* UnwantedDeviceTypeWhenEnumerating(
+ StreamType wanted_type);
// Returns true if a device is present for the given stream type.
bool HasAnyAlsaAudioDevice(StreamType stream);
diff --git a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
index b4cf8c64cc6..9b699ff10f8 100644
--- a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -101,7 +101,7 @@ class AudioOutputStreamWrapper {
samples_per_packet_);
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params,
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(aos);
return aos;
}
diff --git a/chromium/media/audio/mac/audio_input_mac.cc b/chromium/media/audio/mac/audio_input_mac.cc
index 06af6d11c12..7930567fd9c 100644
--- a/chromium/media/audio/mac/audio_input_mac.cc
+++ b/chromium/media/audio/mac/audio_input_mac.cc
@@ -4,15 +4,14 @@
#include "media/audio/mac/audio_input_mac.h"
+#include <CoreServices/CoreServices.h>
+
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_util.h"
-#if !defined(OS_IOS)
-#include <CoreServices/CoreServices.h>
-#endif
namespace media {
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.cc b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
index 17a87b0a7dc..d97f453ca99 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
@@ -35,7 +35,9 @@ static std::ostream& operator<<(std::ostream& os,
// for more details and background regarding this implementation.
AUAudioInputStream::AUAudioInputStream(
- AudioManagerMac* manager, const AudioParameters& params,
+ AudioManagerMac* manager,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
AudioDeviceID audio_device_id)
: manager_(manager),
sink_(NULL),
@@ -48,15 +50,15 @@ AUAudioInputStream::AUAudioInputStream(
DCHECK(manager_);
// Set up the desired (output) format specified by the client.
- format_.mSampleRate = params.sample_rate();
+ format_.mSampleRate = input_params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
+ format_.mBitsPerChannel = input_params.bits_per_sample();
+ format_.mChannelsPerFrame = input_params.channels();
format_.mFramesPerPacket = 1; // uncompressed audio
format_.mBytesPerPacket = (format_.mBitsPerChannel *
- params.channels()) / 8;
+ input_params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0;
@@ -68,10 +70,7 @@ AUAudioInputStream::AUAudioInputStream(
// Note that we use the same native buffer size as for the output side here
// since the AUHAL implementation requires that both capture and render side
// use the same buffer size. See http://crbug.com/154352 for more details.
- // TODO(xians): Get the audio parameters from the right device.
- const AudioParameters parameters =
- manager_->GetInputStreamParameters(AudioManagerBase::kDefaultDeviceId);
- number_of_frames_ = parameters.frames_per_buffer();
+ number_of_frames_ = output_params.frames_per_buffer();
DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
// Derive size (in bytes) of the buffers that we will render to.
@@ -85,7 +84,7 @@ AUAudioInputStream::AUAudioInputStream(
audio_buffer_list_.mNumberBuffers = 1;
AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers;
- audio_buffer->mNumberChannels = params.channels();
+ audio_buffer->mNumberChannels = input_params.channels();
audio_buffer->mDataByteSize = data_byte_size;
audio_buffer->mData = audio_data_buffer_.get();
@@ -93,9 +92,16 @@ AUAudioInputStream::AUAudioInputStream(
// until a requested size is ready to be sent to the client.
// It is not possible to ask for less than |kAudioFramesPerCallback| number of
// audio frames.
- const size_t requested_size_frames =
- params.GetBytesPerBuffer() / format_.mBytesPerPacket;
- DCHECK_GE(requested_size_frames, number_of_frames_);
+ size_t requested_size_frames =
+ input_params.GetBytesPerBuffer() / format_.mBytesPerPacket;
+ if (requested_size_frames < number_of_frames_) {
+ // For devices that only support a low sample rate like 8kHz, we adjust the
+ // buffer size to match number_of_frames_. The value of number_of_frames_
+ // in this case has not been calculated based on hardware settings but
+ // rather our hardcoded defaults (see ChooseBufferSize).
+ requested_size_frames = number_of_frames_;
+ }
+
requested_size_bytes_ = requested_size_frames * format_.mBytesPerFrame;
DVLOG(1) << "Requested buffer size in bytes : " << requested_size_bytes_;
DLOG_IF(INFO, requested_size_frames > number_of_frames_) << "FIFO is used";
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.h b/chromium/media/audio/mac/audio_low_latency_input_mac.h
index 736bf082f5b..04592d2cecf 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.h
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.h
@@ -57,7 +57,8 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// The ctor takes all the usual parameters, plus |manager| which is the
// the audio manager who is creating this object.
AUAudioInputStream(AudioManagerMac* manager,
- const AudioParameters& params,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
AudioDeviceID audio_device_id);
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioInputStream::Close().
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index c0c18ee2cce..8e4b969854e 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -81,11 +81,10 @@ bool AudioManagerMac::HasUnifiedDefaultIO() {
return input_id == output_id;
}
+// Retrieves information on audio devices, and prepends the default
+// device to the list if the list is non-empty.
static void GetAudioDeviceInfo(bool is_input,
media::AudioDeviceNames* device_names) {
- DCHECK(device_names);
- device_names->clear();
-
// Query the number of total devices.
AudioObjectPropertyAddress property_address = {
kAudioHardwarePropertyDevices,
@@ -176,6 +175,16 @@ static void GetAudioDeviceInfo(bool is_input,
if (name)
CFRelease(name);
}
+
+ if (!device_names->empty()) {
+ // Prepend the default device to the list since we always want it to be
+ // on the top of the list for all platforms. There is no duplicate
+ // counting here since the default device has been abstracted out before.
+ media::AudioDeviceName name;
+ name.device_name = AudioManagerBase::kDefaultDeviceName;
+ name.unique_id = AudioManagerBase::kDefaultDeviceId;
+ device_names->push_front(name);
+ }
}
static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
@@ -189,7 +198,7 @@ static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
UInt32 device_size = sizeof(audio_device_id);
OSStatus result = -1;
- if (device_id == AudioManagerBase::kDefaultDeviceId) {
+ if (device_id == AudioManagerBase::kDefaultDeviceId || device_id.empty()) {
// Default Device.
property_address.mSelector = is_input ?
kAudioHardwarePropertyDefaultInputDevice :
@@ -263,7 +272,7 @@ bool AudioManagerMac::HasAudioInputDevices() {
return HasAudioHardware(kAudioHardwarePropertyDefaultInputDevice);
}
-// TODO(crogers): There are several places on the OSX specific code which
+// TODO(xians): There are several places on the OSX specific code which
// could benefit from these helper functions.
bool AudioManagerMac::GetDefaultInputDevice(
AudioDeviceID* device) {
@@ -397,16 +406,14 @@ int AudioManagerMac::HardwareSampleRate() {
void AudioManagerMac::GetAudioInputDeviceNames(
media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
GetAudioDeviceInfo(true, device_names);
- if (!device_names->empty()) {
- // Prepend the default device to the list since we always want it to be
- // on the top of the list for all platforms. There is no duplicate
- // counting here since the default device has been abstracted out before.
- media::AudioDeviceName name;
- name.device_name = AudioManagerBase::kDefaultDeviceName;
- name.unique_id = AudioManagerBase::kDefaultDeviceId;
- device_names->push_front(name);
- }
+}
+
+void AudioManagerMac::GetAudioOutputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetAudioDeviceInfo(false, device_names);
}
AudioParameters AudioManagerMac::GetInputStreamParameters(
@@ -443,21 +450,86 @@ AudioParameters AudioManagerMac::GetInputStreamParameters(
sample_rate, 16, buffer_size);
}
+std::string AudioManagerMac::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(true, input_device_id);
+ if (device == kAudioObjectUnknown)
+ return std::string();
+
+ UInt32 size = 0;
+ AudioObjectPropertyAddress pa = {
+ kAudioDevicePropertyRelatedDevices,
+ kAudioDevicePropertyScopeOutput,
+ kAudioObjectPropertyElementMaster
+ };
+ OSStatus result = AudioObjectGetPropertyDataSize(device, &pa, 0, 0, &size);
+ if (result || !size)
+ return std::string();
+
+ int device_count = size / sizeof(AudioDeviceID);
+ scoped_ptr_malloc<AudioDeviceID>
+ devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
+ result = AudioObjectGetPropertyData(
+ device, &pa, 0, NULL, &size, devices.get());
+ if (result)
+ return std::string();
+
+ for (int i = 0; i < device_count; ++i) {
+ // Get the number of output channels of the device.
+ pa.mSelector = kAudioDevicePropertyStreams;
+ size = 0;
+ result = AudioObjectGetPropertyDataSize(devices.get()[i],
+ &pa,
+ 0,
+ NULL,
+ &size);
+ if (result || !size)
+ continue; // Skip if there aren't any output channels.
+
+ // Get device UID.
+ CFStringRef uid = NULL;
+ size = sizeof(uid);
+ pa.mSelector = kAudioDevicePropertyDeviceUID;
+ result = AudioObjectGetPropertyData(devices.get()[i],
+ &pa,
+ 0,
+ NULL,
+ &size,
+ &uid);
+ if (result || !uid)
+ continue;
+
+ std::string ret(base::SysCFStringRefToUTF8(uid));
+ CFRelease(uid);
+ return ret;
+ }
+
+ // No matching device found.
+ return std::string();
+}
+
AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
const AudioParameters& params) {
- return MakeLowLatencyOutputStream(params, std::string());
+ return MakeLowLatencyOutputStream(params, std::string(), std::string());
}
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
// Handle basic output with no input channels.
if (params.input_channels() == 0) {
- AudioDeviceID device = kAudioObjectUnknown;
- GetDefaultOutputDevice(&device);
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Failed to open output device: " << device_id;
+ return NULL;
+ }
return new AUHALStream(this, params, device);
}
- // TODO(crogers): support more than stereo input.
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
+
+ // TODO(xians): support more than stereo input.
if (params.input_channels() != 2) {
// WebAudio is currently hard-coded to 2 channels so we should not
// see this case.
@@ -494,7 +566,7 @@ AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
// different and arbitrary combinations of input and output devices
// even running at different sample-rates.
// kAudioDeviceUnknown translates to "use default" here.
- // TODO(crogers): consider tracking UMA stats on AUHALStream
+ // TODO(xians): consider tracking UMA stats on AUHALStream
// versus AudioSynchronizedStream.
AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, input_device_id);
if (audio_device_id == kAudioObjectUnknown)
@@ -506,6 +578,33 @@ AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
kAudioDeviceUnknown);
}
+std::string AudioManagerMac::GetDefaultOutputDeviceID() {
+ AudioDeviceID device_id = kAudioObjectUnknown;
+ if (!GetDefaultOutputDevice(&device_id))
+ return std::string();
+
+ const AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyDeviceUID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ CFStringRef device_uid = NULL;
+ UInt32 size = sizeof(device_uid);
+ OSStatus status = AudioObjectGetPropertyData(device_id,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &device_uid);
+ if (status != kAudioHardwareNoError || !device_uid)
+ return std::string();
+
+ std::string ret(base::SysCFStringRefToUTF8(device_uid));
+ CFRelease(device_uid);
+
+ return ret;
+}
+
AudioInputStream* AudioManagerMac::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
@@ -515,27 +614,47 @@ AudioInputStream* AudioManagerMac::MakeLinearInputStream(
AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- // Gets the AudioDeviceID that refers to the AudioOutputDevice with the device
+ // Gets the AudioDeviceID that refers to the AudioInputDevice with the device
// unique id. This AudioDeviceID is used to set the device for Audio Unit.
AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, device_id);
AudioInputStream* stream = NULL;
- if (audio_device_id != kAudioObjectUnknown)
- stream = new AUAudioInputStream(this, params, audio_device_id);
+ if (audio_device_id != kAudioObjectUnknown) {
+ // AUAudioInputStream needs to be fed the preferred audio output parameters
+ // of the matching device so that the buffer size of both input and output
+ // can be matched. See constructor of AUAudioInputStream for more.
+ const std::string associated_output_device(
+ GetAssociatedOutputDeviceID(device_id));
+ const AudioParameters output_params =
+ GetPreferredOutputStreamParameters(
+ associated_output_device.empty() ?
+ AudioManagerBase::kDefaultDeviceId : associated_output_device,
+ params);
+ stream = new AUAudioInputStream(this, params, output_params,
+ audio_device_id);
+ }
return stream;
}
AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, output_device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Invalid output device " << output_device_id;
+ return AudioParameters();
+ }
+
int hardware_channels = 2;
- if (!GetDefaultOutputChannels(&hardware_channels)) {
+ if (!GetDeviceChannels(device, kAudioDevicePropertyScopeOutput,
+ &hardware_channels)) {
// Fallback to stereo.
hardware_channels = 2;
}
ChannelLayout channel_layout = GuessChannelLayout(hardware_channels);
- const int hardware_sample_rate = AUAudioOutputStream::HardwareSampleRate();
+ const int hardware_sample_rate = HardwareSampleRateForDevice(device);
const int buffer_size = ChooseBufferSize(hardware_sample_rate);
int input_channels = 0;
@@ -543,7 +662,7 @@ AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
input_channels = input_params.input_channels();
if (input_channels > 0) {
- // TODO(crogers): given the limitations of the AudioOutputStream
+ // TODO(xians): given the limitations of the AudioOutputStream
// back-ends used with synchronized I/O, we hard-code to stereo.
// Specifically, this is a limitation of AudioSynchronizedStream which
// can be removed as part of the work to consolidate these back-ends.
diff --git a/chromium/media/audio/mac/audio_manager_mac.h b/chromium/media/audio/mac/audio_manager_mac.h
index cd3cc2e94b5..d162554b405 100644
--- a/chromium/media/audio/mac/audio_manager_mac.h
+++ b/chromium/media/audio/mac/audio_manager_mac.h
@@ -27,21 +27,27 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
// Implementation of AudioManager.
virtual bool HasAudioOutputDevices() OVERRIDE;
virtual bool HasAudioInputDevices() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
+ virtual void GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
// Implementation of AudioManagerBase.
virtual AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual std::string GetDefaultOutputDeviceID() OVERRIDE;
static bool GetDefaultInputDevice(AudioDeviceID* device);
static bool GetDefaultOutputDevice(AudioDeviceID* device);
@@ -64,6 +70,7 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
virtual ~AudioManagerMac();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
diff --git a/chromium/media/audio/mock_audio_manager.cc b/chromium/media/audio/mock_audio_manager.cc
index 60898bd61b8..a164332a64a 100644
--- a/chromium/media/audio/mock_audio_manager.cc
+++ b/chromium/media/audio/mock_audio_manager.cc
@@ -33,18 +33,24 @@ void MockAudioManager::ShowAudioInputSettings() {
}
void MockAudioManager::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+ AudioDeviceNames* device_names) {
+}
+
+void MockAudioManager::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
}
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStream(
- const media::AudioParameters& params,
- const std::string& input_device_id) {
+ const media::AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
NOTREACHED();
return NULL;
}
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
NOTREACHED();
return NULL;
@@ -77,9 +83,19 @@ AudioParameters MockAudioManager::GetDefaultOutputStreamParameters() {
return AudioParameters();
}
+AudioParameters MockAudioManager::GetOutputStreamParameters(
+ const std::string& device_id) {
+ return AudioParameters();
+}
+
AudioParameters MockAudioManager::GetInputStreamParameters(
const std::string& device_id) {
return AudioParameters();
}
+std::string MockAudioManager::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ return std::string();
+}
+
} // namespace media.
diff --git a/chromium/media/audio/mock_audio_manager.h b/chromium/media/audio/mock_audio_manager.h
index eee84b1643f..7bc30f578e7 100644
--- a/chromium/media/audio/mock_audio_manager.h
+++ b/chromium/media/audio/mock_audio_manager.h
@@ -34,12 +34,17 @@ class MockAudioManager : public media::AudioManager {
virtual void GetAudioInputDeviceNames(
media::AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ media::AudioDeviceNames* device_names) OVERRIDE;
+
virtual media::AudioOutputStream* MakeAudioOutputStream(
const media::AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual media::AudioOutputStream* MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual media::AudioInputStream* MakeAudioInputStream(
@@ -55,8 +60,12 @@ class MockAudioManager : public media::AudioManager {
AudioDeviceListener* listener) OVERRIDE;
virtual AudioParameters GetDefaultOutputStreamParameters() OVERRIDE;
+ virtual AudioParameters GetOutputStreamParameters(
+ const std::string& device_id) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
private:
virtual ~MockAudioManager();
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.cc b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
index 4005aeb98f0..a97ea8f625e 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.cc
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
@@ -92,7 +92,9 @@ AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
return MakeOutputStream(params);
}
@@ -112,7 +114,10 @@ AudioInputStream* AudioManagerOpenBSD::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerOpenBSD::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 512;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.h b/chromium/media/audio/openbsd/audio_manager_openbsd.h
index a1adcb6c86c..e4bb3948d28 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.h
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.h
@@ -27,6 +27,7 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -37,6 +38,7 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
virtual ~AudioManagerOpenBSD();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.cc b/chromium/media/audio/pulse/audio_manager_pulse.cc
index dcdd3282228..5c09f149057 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.cc
+++ b/chromium/media/audio/pulse/audio_manager_pulse.cc
@@ -66,19 +66,13 @@ AudioManagerPulse::~AudioManagerPulse() {
// Implementation of AudioManager.
bool AudioManagerPulse::HasAudioOutputDevices() {
- DCHECK(input_mainloop_);
- DCHECK(input_context_);
- media::AudioDeviceNames devices;
- AutoPulseLock auto_lock(input_mainloop_);
- devices_ = &devices;
- pa_operation* operation = pa_context_get_sink_info_list(
- input_context_, OutputDevicesInfoCallback, this);
- WaitForOperationCompletion(input_mainloop_, operation);
+ AudioDeviceNames devices;
+ GetAudioOutputDeviceNames(&devices);
return !devices.empty();
}
bool AudioManagerPulse::HasAudioInputDevices() {
- media::AudioDeviceNames devices;
+ AudioDeviceNames devices;
GetAudioInputDeviceNames(&devices);
return !devices.empty();
}
@@ -87,18 +81,24 @@ void AudioManagerPulse::ShowAudioInputSettings() {
AudioManagerLinux::ShowLinuxAudioInputSettings();
}
-void AudioManagerPulse::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+void AudioManagerPulse::GetAudioDeviceNames(
+ bool input, media::AudioDeviceNames* device_names) {
DCHECK(device_names->empty());
DCHECK(input_mainloop_);
DCHECK(input_context_);
AutoPulseLock auto_lock(input_mainloop_);
devices_ = device_names;
- pa_operation* operation = pa_context_get_source_info_list(
+ pa_operation* operation = NULL;
+ if (input) {
+ operation = pa_context_get_source_info_list(
input_context_, InputDevicesInfoCallback, this);
+ } else {
+ operation = pa_context_get_sink_info_list(
+ input_context_, OutputDevicesInfoCallback, this);
+ }
WaitForOperationCompletion(input_mainloop_, operation);
- // Append the default device on the top of the list if the list is not empty.
+ // Prepend the default device if the list is not empty.
if (!device_names->empty()) {
device_names->push_front(
AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
@@ -106,6 +106,16 @@ void AudioManagerPulse::GetAudioInputDeviceNames(
}
}
+void AudioManagerPulse::GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) {
+ GetAudioDeviceNames(true, device_names);
+}
+
+void AudioManagerPulse::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ GetAudioDeviceNames(false, device_names);
+}
+
AudioParameters AudioManagerPulse::GetInputStreamParameters(
const std::string& device_id) {
static const int kDefaultInputBufferSize = 1024;
@@ -123,7 +133,10 @@ AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream(
}
AudioOutputStream* AudioManagerPulse::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return MakeOutputStream(params, input_device_id);
}
@@ -141,7 +154,10 @@ AudioInputStream* AudioManagerPulse::MakeLowLatencyInputStream(
}
AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
static const int kDefaultOutputBufferSize = 512;
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
@@ -286,8 +302,8 @@ void AudioManagerPulse::InputDevicesInfoCallback(pa_context* context,
// Exclude the output devices.
if (info->monitor_of_sink == PA_INVALID_INDEX) {
- manager->devices_->push_back(media::AudioDeviceName(info->description,
- info->name));
+ manager->devices_->push_back(AudioDeviceName(info->description,
+ info->name));
}
}
@@ -302,8 +318,8 @@ void AudioManagerPulse::OutputDevicesInfoCallback(pa_context* context,
return;
}
- manager->devices_->push_back(media::AudioDeviceName(info->description,
- info->name));
+ manager->devices_->push_back(AudioDeviceName(info->description,
+ info->name));
}
void AudioManagerPulse::SampleRateInfoCallback(pa_context* context,
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.h b/chromium/media/audio/pulse/audio_manager_pulse.h
index 6dfebaeff39..36396639929 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.h
+++ b/chromium/media/audio/pulse/audio_manager_pulse.h
@@ -25,8 +25,10 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
virtual bool HasAudioOutputDevices() OVERRIDE;
virtual bool HasAudioInputDevices() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
+ virtual void GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
@@ -35,6 +37,7 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
@@ -43,12 +46,15 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
protected:
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
bool Init();
void DestroyPulse();
+ void GetAudioDeviceNames(bool input, media::AudioDeviceNames* device_names);
+
// Callback to get the devices' info like names, used by GetInputDevices().
static void InputDevicesInfoCallback(pa_context* context,
const pa_source_info* info,
diff --git a/chromium/media/audio/shared_memory_util.cc b/chromium/media/audio/shared_memory_util.cc
index b65df03e2e1..523cdb9646c 100644
--- a/chromium/media/audio/shared_memory_util.cc
+++ b/chromium/media/audio/shared_memory_util.cc
@@ -4,6 +4,8 @@
#include "media/audio/shared_memory_util.h"
+#include <algorithm>
+
#include "base/atomicops.h"
#include "base/logging.h"
diff --git a/chromium/media/audio/test_audio_input_controller_factory.cc b/chromium/media/audio/test_audio_input_controller_factory.cc
index 64bfb9f060d..d146231a25d 100644
--- a/chromium/media/audio/test_audio_input_controller_factory.cc
+++ b/chromium/media/audio/test_audio_input_controller_factory.cc
@@ -12,8 +12,9 @@ TestAudioInputController::TestAudioInputController(
AudioManager* audio_manager,
const AudioParameters& audio_parameters,
EventHandler* event_handler,
- SyncWriter* sync_writer)
- : AudioInputController(event_handler, sync_writer),
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor)
+ : AudioInputController(event_handler, sync_writer, user_input_monitor),
audio_parameters_(audio_parameters),
factory_(factory),
event_handler_(event_handler) {
@@ -48,10 +49,11 @@ TestAudioInputControllerFactory::~TestAudioInputControllerFactory() {
AudioInputController* TestAudioInputControllerFactory::Create(
AudioManager* audio_manager,
AudioInputController::EventHandler* event_handler,
- AudioParameters params) {
+ AudioParameters params,
+ UserInputMonitor* user_input_monitor) {
DCHECK(!controller_); // Only one test instance managed at a time.
- controller_ = new TestAudioInputController(this, audio_manager, params,
- event_handler, NULL);
+ controller_ = new TestAudioInputController(
+ this, audio_manager, params, event_handler, NULL, user_input_monitor);
return controller_;
}
diff --git a/chromium/media/audio/test_audio_input_controller_factory.h b/chromium/media/audio/test_audio_input_controller_factory.h
index 0a179473c1c..4968c013d97 100644
--- a/chromium/media/audio/test_audio_input_controller_factory.h
+++ b/chromium/media/audio/test_audio_input_controller_factory.h
@@ -10,6 +10,7 @@
namespace media {
+class UserInputMonitor;
class TestAudioInputControllerFactory;
// TestAudioInputController and TestAudioInputControllerFactory are used for
@@ -56,7 +57,8 @@ class TestAudioInputController : public AudioInputController {
AudioManager* audio_manager,
const AudioParameters& audio_parameters,
EventHandler* event_handler,
- SyncWriter* sync_writer);
+ SyncWriter* sync_writer,
+ UserInputMonitor* user_input_monitor);
// Returns the event handler installed on the AudioInputController.
EventHandler* event_handler() const { return event_handler_; }
@@ -94,7 +96,8 @@ class TestAudioInputControllerFactory : public AudioInputController::Factory {
virtual AudioInputController* Create(
AudioManager* audio_manager,
AudioInputController::EventHandler* event_handler,
- AudioParameters params) OVERRIDE;
+ AudioParameters params,
+ UserInputMonitor* user_input_monitor) OVERRIDE;
void SetDelegateForTests(TestAudioInputControllerDelegate* delegate);
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index e0819439109..a174ea2ea0d 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -103,9 +103,8 @@ bool WASAPIAudioInputStream::Open() {
// Verify that the selected audio endpoint supports the specified format
// set during construction.
- if (!DesiredFormatIsSupported()) {
+ if (!DesiredFormatIsSupported())
return false;
- }
// Initialize the audio stream between the client and the device using
// shared mode and a lowest possible glitch-free latency.
@@ -141,6 +140,9 @@ void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
HRESULT hr = audio_client_->Start();
DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming.";
+ if (SUCCEEDED(hr) && audio_render_client_for_loopback_)
+ hr = audio_render_client_for_loopback_->Start();
+
started_ = SUCCEEDED(hr);
}
@@ -276,6 +278,10 @@ HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
// Retrieve the default capture audio endpoint.
hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
endpoint_device.Receive());
+ } else if (device_id == AudioManagerBase::kLoopbackInputDeviceId) {
+ // Capture the default playback stream.
+ hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+ endpoint_device.Receive());
} else {
// Retrieve a capture endpoint device that is specified by an endpoint
// device-identification string.
@@ -454,42 +460,44 @@ void WASAPIAudioInputStream::HandleError(HRESULT err) {
HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
ScopedComPtr<IMMDeviceEnumerator> enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- enumerator.ReceiveVoid());
- if (SUCCEEDED(hr)) {
- // Retrieve the IMMDevice by using the specified role or the specified
- // unique endpoint device-identification string.
- // TODO(henrika): possibly add support for the eCommunications as well.
- if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture,
- eConsole,
- endpoint_device_.Receive());
- } else {
- // Retrieve a capture endpoint device that is specified by an endpoint
- // device-identification string.
- hr = enumerator->GetDevice(UTF8ToUTF16(device_id_).c_str(),
- endpoint_device_.Receive());
- }
+ HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL, CLSCTX_INPROC_SERVER);
+ if (FAILED(hr))
+ return hr;
- if (FAILED(hr))
- return hr;
+ // Retrieve the IMMDevice by using the specified role or the specified
+ // unique endpoint device-identification string.
+ // TODO(henrika): possibly add support for the eCommunications as well.
+ if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Retrieve the default capture audio endpoint for the specified role.
+ // Note that, in Windows Vista, the MMDevice API supports device roles
+ // but the system-supplied user interface programs do not.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
+ endpoint_device_.Receive());
+ } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
+ // Capture the default playback stream.
+ hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+ endpoint_device_.Receive());
+ } else {
+ // Retrieve a capture endpoint device that is specified by an endpoint
+ // device-identification string.
+ hr = enumerator->GetDevice(UTF8ToUTF16(device_id_).c_str(),
+ endpoint_device_.Receive());
+ }
- // Verify that the audio endpoint device is active, i.e., the audio
- // adapter that connects to the endpoint device is present and enabled.
- DWORD state = DEVICE_STATE_DISABLED;
- hr = endpoint_device_->GetState(&state);
- if (SUCCEEDED(hr)) {
- if (!(state & DEVICE_STATE_ACTIVE)) {
- DLOG(ERROR) << "Selected capture device is not active.";
- hr = E_ACCESSDENIED;
- }
- }
+ if (FAILED(hr))
+ return hr;
+
+ // Verify that the audio endpoint device is active, i.e., the audio
+ // adapter that connects to the endpoint device is present and enabled.
+ DWORD state = DEVICE_STATE_DISABLED;
+ hr = endpoint_device_->GetState(&state);
+ if (FAILED(hr))
+ return hr;
+
+ if (!(state & DEVICE_STATE_ACTIVE)) {
+ DLOG(ERROR) << "Selected capture device is not active.";
+ hr = E_ACCESSDENIED;
}
return hr;
@@ -565,16 +573,25 @@ bool WASAPIAudioInputStream::DesiredFormatIsSupported() {
}
HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
+ DWORD flags;
+ // Use event-driven mode only fo regular input devices. For loopback the
+ // EVENTCALLBACK flag is specified when intializing
+ // |audio_render_client_for_loopback_|.
+ if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
+ flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
+ } else {
+ flags =
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
+ }
+
// Initialize the audio stream between the client and the device.
- // We connect indirectly through the audio engine by using shared mode
- // and WASAPI is initialized in an event driven mode.
+ // We connect indirectly through the audio engine by using shared mode.
// Note that, |hnsBufferDuration| is set of 0, which ensures that the
// buffer is never smaller than the minimum buffer size needed to ensure
// that glitches do not occur between the periodic processing passes.
// This setting should lead to lowest possible latency.
HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
- AUDCLNT_STREAMFLAGS_NOPERSIST,
+ flags,
0, // hnsBufferDuration
0,
&format_,
@@ -590,6 +607,7 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
if (FAILED(hr))
return hr;
+
DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
<< " [frames]";
@@ -618,9 +636,41 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
}
#endif
- // Set the event handle that the audio engine will signal each time
- // a buffer becomes ready to be processed by the client.
- hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get());
+ // Set the event handle that the audio engine will signal each time a buffer
+ // becomes ready to be processed by the client.
+ //
+ // In loopback case the capture device doesn't receive any events, so we
+ // need to create a separate playback client to get notifications. According
+ // to MSDN:
+ //
+ // A pull-mode capture client does not receive any events when a stream is
+ // initialized with event-driven buffering and is loopback-enabled. To
+ // work around this, initialize a render stream in event-driven mode. Each
+ // time the client receives an event for the render stream, it must signal
+ // the capture client to run the capture thread that reads the next set of
+ // samples from the capture endpoint buffer.
+ //
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).aspx
+ if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
+ hr = endpoint_device_->Activate(
+ __uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL,
+ audio_render_client_for_loopback_.ReceiveVoid());
+ if (FAILED(hr))
+ return hr;
+
+ hr = audio_render_client_for_loopback_->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST,
+ 0, 0, &format_, NULL);
+ if (FAILED(hr))
+ return hr;
+
+ hr = audio_render_client_for_loopback_->SetEventHandle(
+ audio_samples_ready_event_.Get());
+ } else {
+ hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get());
+ }
+
if (FAILED(hr))
return hr;
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.h b/chromium/media/audio/win/audio_low_latency_input_win.h
index 4f9c7fb6c88..99e1604925a 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.h
+++ b/chromium/media/audio/win/audio_low_latency_input_win.h
@@ -184,6 +184,14 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// an audio stream between an audio application and the audio engine.
base::win::ScopedComPtr<IAudioClient> audio_client_;
+ // Loopback IAudioClient doesn't support event-driven mode, so a separate
+ // IAudioClient is needed to receive notifications when data is available in
+ // the buffer. For loopback input |audio_client_| is used to receive data,
+ // while |audio_render_client_for_loopback_| is used to get notifications
+ // when a new buffer is ready. See comment in InitializeAudioEngine() for
+ // details.
+ base::win::ScopedComPtr<IAudioClient> audio_render_client_for_loopback_;
+
// The IAudioCaptureClient interface enables a client to read input data
// from a capture endpoint buffer.
base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
diff --git a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
index 40990ec13d4..11fad25d3fe 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -39,12 +39,53 @@ ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
MOCK_METHOD5(OnData, void(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume));
+ const uint8* src, uint32 size,
+ uint32 hardware_delay_bytes, double volume));
MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
};
+class FakeAudioInputCallback : public AudioInputStream::AudioInputCallback {
+ public:
+ FakeAudioInputCallback()
+ : closed_(false),
+ error_(false),
+ data_event_(false, false) {
+ }
+
+ const std::vector<uint8>& received_data() const { return received_data_; }
+ bool closed() const { return closed_; }
+ bool error() const { return error_; }
+
+ // Waits until OnData() is called on another thread.
+ void WaitForData() {
+ data_event_.Wait();
+ }
+
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src, uint32 size,
+ uint32 hardware_delay_bytes, double volume) OVERRIDE {
+ received_data_.insert(received_data_.end(), src, src + size);
+ data_event_.Signal();
+ }
+
+ virtual void OnClose(AudioInputStream* stream) OVERRIDE {
+ closed_ = true;
+ }
+
+ virtual void OnError(AudioInputStream* stream) OVERRIDE {
+ error_ = true;
+ }
+
+ private:
+ std::vector<uint8> received_data_;
+ base::WaitableEvent data_event_;
+ bool closed_;
+ bool error_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioInputCallback);
+};
+
// This audio sink implementation should be used for manual tests only since
// the recorded data is stored on a raw binary data file.
class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
@@ -188,6 +229,39 @@ static AudioInputStream* CreateDefaultAudioInputStream(
return ais;
}
+class ScopedAudioInputStream {
+ public:
+ explicit ScopedAudioInputStream(AudioInputStream* stream)
+ : stream_(stream) {}
+
+ ~ScopedAudioInputStream() {
+ if (stream_)
+ stream_->Close();
+ }
+
+ void Close() {
+ if (stream_)
+ stream_->Close();
+ stream_ = NULL;
+ }
+
+ AudioInputStream* operator->() {
+ return stream_;
+ }
+
+ AudioInputStream* get() const { return stream_; }
+
+ void Reset(AudioInputStream* new_stream) {
+ Close();
+ stream_ = new_stream;
+ }
+
+ private:
+ AudioInputStream* stream_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAudioInputStream);
+};
+
// Verify that we can retrieve the current hardware/mixing sample rate
// for all available input devices.
TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
@@ -217,8 +291,9 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
if (!CanRunAudioTests(audio_manager.get()))
return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
- ais->Close();
+ ScopedAudioInputStream ais(
+ CreateDefaultAudioInputStream(audio_manager.get()));
+ ais.Close();
}
// Test Open(), Close() calling sequence.
@@ -226,9 +301,10 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
if (!CanRunAudioTests(audio_manager.get()))
return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ ScopedAudioInputStream ais(
+ CreateDefaultAudioInputStream(audio_manager.get()));
EXPECT_TRUE(ais->Open());
- ais->Close();
+ ais.Close();
}
// Test Open(), Start(), Close() calling sequence.
@@ -236,13 +312,14 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
if (!CanRunAudioTests(audio_manager.get()))
return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ ScopedAudioInputStream ais(
+ CreateDefaultAudioInputStream(audio_manager.get()));
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
- EXPECT_CALL(sink, OnClose(ais))
+ EXPECT_CALL(sink, OnClose(ais.get()))
.Times(1);
- ais->Close();
+ ais.Close();
}
// Test Open(), Start(), Stop(), Close() calling sequence.
@@ -250,14 +327,15 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
if (!CanRunAudioTests(audio_manager.get()))
return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ ScopedAudioInputStream ais(
+ CreateDefaultAudioInputStream(audio_manager.get()));
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
ais->Stop();
- EXPECT_CALL(sink, OnClose(ais))
+ EXPECT_CALL(sink, OnClose(ais.get()))
.Times(1);
- ais->Close();
+ ais.Close();
}
// Test some additional calling sequences.
@@ -265,8 +343,10 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
if (!CanRunAudioTests(audio_manager.get()))
return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
- WASAPIAudioInputStream* wais = static_cast<WASAPIAudioInputStream*>(ais);
+ ScopedAudioInputStream ais(
+ CreateDefaultAudioInputStream(audio_manager.get()));
+ WASAPIAudioInputStream* wais =
+ static_cast<WASAPIAudioInputStream*>(ais.get());
// Open(), Open() should fail the second time.
EXPECT_TRUE(ais->Open());
@@ -286,9 +366,9 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
ais->Stop();
EXPECT_FALSE(wais->started());
- EXPECT_CALL(sink, OnClose(ais))
+ EXPECT_CALL(sink, OnClose(ais.get()))
.Times(1);
- ais->Close();
+ ais.Close();
}
TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
@@ -304,7 +384,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
// Create default WASAPI input stream which records in stereo using
// the shared mixing rate. The default buffer size is 10ms.
AudioInputStreamWrapper aisw(audio_manager.get());
- AudioInputStream* ais = aisw.Create();
+ ScopedAudioInputStream ais(aisw.Create());
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
@@ -317,7 +397,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
// All should contain valid packets of the same size and a valid delay
// estimate.
EXPECT_CALL(sink, OnData(
- ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
@@ -327,49 +407,78 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
// Store current packet size (to be used in the subsequent tests).
int samples_per_packet_10ms = aisw.samples_per_packet();
- EXPECT_CALL(sink, OnClose(ais))
+ EXPECT_CALL(sink, OnClose(ais.get()))
.Times(1);
- ais->Close();
+ ais.Close();
// 20 ms packet size.
count = 0;
- ais = aisw.Create(2 * samples_per_packet_10ms);
+ ais.Reset(aisw.Create(2 * samples_per_packet_10ms));
EXPECT_TRUE(ais->Open());
bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
(aisw.bits_per_sample() / 8);
EXPECT_CALL(sink, OnData(
- ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
loop.Run();
ais->Stop();
- EXPECT_CALL(sink, OnClose(ais))
+ EXPECT_CALL(sink, OnClose(ais.get()))
.Times(1);
- ais->Close();
+ ais.Close();
// 5 ms packet size.
count = 0;
- ais = aisw.Create(samples_per_packet_10ms / 2);
+ ais.Reset(aisw.Create(samples_per_packet_10ms / 2));
EXPECT_TRUE(ais->Open());
bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
(aisw.bits_per_sample() / 8);
EXPECT_CALL(sink, OnData(
- ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
loop.Run();
ais->Stop();
- EXPECT_CALL(sink, OnClose(ais))
+ EXPECT_CALL(sink, OnClose(ais.get()))
.Times(1);
- ais->Close();
+ ais.Close();
+}
+
+// Test that we can capture loopback stream.
+TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!audio_manager->HasAudioOutputDevices() || !CoreAudioUtil::IsSupported())
+ return;
+
+ AudioParameters params = audio_manager->GetInputStreamParameters(
+ AudioManagerBase::kLoopbackInputDeviceId);
+
+ AudioParameters output_params =
+ audio_manager->GetOutputStreamParameters(std::string());
+ EXPECT_EQ(params.sample_rate(), output_params.sample_rate());
+ EXPECT_EQ(params.channel_layout(), output_params.channel_layout());
+
+ ScopedAudioInputStream stream(audio_manager->MakeAudioInputStream(
+ params, AudioManagerBase::kLoopbackInputDeviceId));
+ ASSERT_TRUE(stream->Open());
+ FakeAudioInputCallback sink;
+ stream->Start(&sink);
+ ASSERT_FALSE(sink.error());
+
+ sink.WaitForData();
+ stream.Close();
+
+ EXPECT_FALSE(sink.received_data().empty());
+ EXPECT_TRUE(sink.closed());
+ EXPECT_FALSE(sink.error());
}
// This test is intended for manual tests and should only be enabled
@@ -389,7 +498,7 @@ TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
const char* file_name = "out_stereo_10sec.pcm";
AudioInputStreamWrapper aisw(audio_manager.get());
- AudioInputStream* ais = aisw.Create();
+ ScopedAudioInputStream ais(aisw.Create());
EXPECT_TRUE(ais->Open());
LOG(INFO) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
@@ -399,7 +508,7 @@ TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
base::PlatformThread::Sleep(TestTimeouts::action_timeout());
ais->Stop();
LOG(INFO) << ">> Recording has stopped.";
- ais->Close();
+ ais.Close();
}
} // namespace media
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index b2098b02094..c889c03ef2c 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -111,14 +111,26 @@ ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
}
// static
-int WASAPIAudioOutputStream::HardwareSampleRate() {
+int WASAPIAudioOutputStream::HardwareSampleRate(const std::string& device_id) {
WAVEFORMATPCMEX format;
- return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat(
- eRender, eConsole, &format)) ?
- static_cast<int>(format.Format.nSamplesPerSec) : 0;
+ ScopedComPtr<IAudioClient> client;
+ if (device_id.empty()) {
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ } else {
+ ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id));
+ if (!device)
+ return 0;
+ client = CoreAudioUtil::CreateClient(device);
+ }
+
+ if (!client || FAILED(CoreAudioUtil::GetSharedModeMixFormat(client, &format)))
+ return 0;
+
+ return static_cast<int>(format.Format.nSamplesPerSec);
}
WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
+ const std::string& device_id,
const AudioParameters& params,
ERole device_role)
: creating_thread_id_(base::PlatformThread::CurrentId()),
@@ -127,6 +139,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
audio_parameters_are_valid_(false),
volume_(1.0),
endpoint_buffer_size_frames_(0),
+ device_id_(device_id),
device_role_(device_role),
share_mode_(GetShareMode()),
num_written_frames_(0),
@@ -142,12 +155,16 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
// channel count are excluded) to the preferred (native) audio parameters.
// Open() will fail if this is not the case.
AudioParameters preferred_params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
- eRender, device_role, &preferred_params);
+ HRESULT hr = device_id_.empty() ?
+ CoreAudioUtil::GetPreferredAudioParameters(eRender, device_role,
+ &preferred_params) :
+ CoreAudioUtil::GetPreferredAudioParameters(device_id_,
+ &preferred_params);
audio_parameters_are_valid_ = SUCCEEDED(hr) &&
CompareAudioParametersNoBitDepthOrChannels(params, preferred_params);
LOG_IF(WARNING, !audio_parameters_are_valid_)
- << "Input and preferred parameters are not identical.";
+ << "Input and preferred parameters are not identical. "
+ << "Device id: " << device_id_;
}
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
@@ -203,7 +220,6 @@ bool WASAPIAudioOutputStream::Open() {
if (opened_)
return true;
-
// Audio parameters must be identical to the preferred set of parameters
// if shared mode (default) is utilized.
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
@@ -214,8 +230,16 @@ bool WASAPIAudioOutputStream::Open() {
}
// Create an IAudioClient interface for the default rendering IMMDevice.
- ScopedComPtr<IAudioClient> audio_client =
- CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
+ ScopedComPtr<IAudioClient> audio_client;
+ if (device_id_.empty()) {
+ audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
+ } else {
+ ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
+ DLOG_IF(ERROR, !device) << "Failed to open device: " << device_id_;
+ if (device)
+ audio_client = CoreAudioUtil::CreateClient(device);
+ }
+
if (!audio_client)
return false;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index b0e990bb1a4..7884d8840f7 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -122,6 +122,7 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// The ctor takes all the usual parameters, plus |manager| which is the
// the audio manager who is creating this object.
WASAPIAudioOutputStream(AudioManagerWin* manager,
+ const std::string& device_id,
const AudioParameters& params,
ERole device_role);
@@ -149,8 +150,9 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
static ChannelLayout HardwareChannelLayout();
// Retrieves the sample rate the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- static int HardwareSampleRate();
+ // processing/mixing of shared-mode streams. To fetch the settings for the
+ // default device, pass an empty string as the |device_id|.
+ static int HardwareSampleRate(const std::string& device_id);
// Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
// as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
@@ -219,6 +221,9 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
+ // The target device id or an empty string for the default device.
+ const std::string device_id_;
+
// Defines the role that the system has assigned to an audio endpoint device.
ERole device_role_;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
index 8c3e366c0cc..1f78facf91d 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -234,7 +234,7 @@ class AudioOutputStreamWrapper {
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
AudioParameters(format_, channel_layout_, sample_rate_,
bits_per_sample_, samples_per_packet_),
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(aos);
return aos;
}
@@ -268,7 +268,7 @@ TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
// Default device intended for games, system notification sounds,
// and voice commands.
int fs = static_cast<int>(
- WASAPIAudioOutputStream::HardwareSampleRate());
+ WASAPIAudioOutputStream::HardwareSampleRate(std::string()));
EXPECT_GE(fs, 0);
}
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
index a753e554cb4..0352e6677d2 100644
--- a/chromium/media/audio/win/audio_manager_win.cc
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -240,27 +240,44 @@ void AudioManagerWin::ShowAudioInputSettings() {
base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
}
-void AudioManagerWin::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
+void AudioManagerWin::GetAudioDeviceNamesImpl(
+ bool input,
+ AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
DCHECK(enumeration_type() != kUninitializedEnumeration);
// Enumerate all active audio-endpoint capture devices.
if (enumeration_type() == kWaveEnumeration) {
// Utilize the Wave API for Windows XP.
- media::GetInputDeviceNamesWinXP(device_names);
+ if (input)
+ GetInputDeviceNamesWinXP(device_names);
+ else
+ GetOutputDeviceNamesWinXP(device_names);
} else {
// Utilize the MMDevice API (part of Core Audio) for Vista and higher.
- media::GetInputDeviceNamesWin(device_names);
+ if (input)
+ GetInputDeviceNamesWin(device_names);
+ else
+ GetOutputDeviceNamesWin(device_names);
}
// Always add default device parameters as first element.
if (!device_names->empty()) {
- media::AudioDeviceName name;
+ AudioDeviceName name;
name.device_name = AudioManagerBase::kDefaultDeviceName;
name.unique_id = AudioManagerBase::kDefaultDeviceId;
device_names->push_front(name);
}
}
+void AudioManagerWin::GetAudioInputDeviceNames(AudioDeviceNames* device_names) {
+ GetAudioDeviceNamesImpl(true, device_names);
+}
+
+void AudioManagerWin::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ GetAudioDeviceNamesImpl(false, device_names);
+}
+
AudioParameters AudioManagerWin::GetInputStreamParameters(
const std::string& device_id) {
int sample_rate = 48000;
@@ -280,6 +297,16 @@ AudioParameters AudioManagerWin::GetInputStreamParameters(
sample_rate, 16, kFallbackBufferSize);
}
+std::string AudioManagerWin::GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) {
+ if (!CoreAudioUtil::IsSupported()) {
+ NOTIMPLEMENTED()
+ << "GetAssociatedOutputDeviceID is not supported on this OS";
+ return std::string();
+ }
+ return CoreAudioUtil::GetMatchingOutputDeviceID(input_device_id);
+}
+
// Factory for the implementations of AudioOutputStream for AUDIO_PCM_LINEAR
// mode.
// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
@@ -291,7 +318,7 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
return new PCMWaveOutAudioOutputStream(this,
params,
- media::NumberOfWaveOutBuffers(),
+ NumberOfWaveOutBuffers(),
WAVE_MAPPER);
}
@@ -301,25 +328,31 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
if (params.channels() > kWinMaxChannels)
return NULL;
if (!CoreAudioUtil::IsSupported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
+ DLOG_IF(ERROR, !device_id.empty())
+ << "Opening by device id not supported by PCMWaveOutAudioOutputStream";
DVLOG(1) << "Using WaveOut since WASAPI requires at least Vista.";
return new PCMWaveOutAudioOutputStream(
- this, params, media::NumberOfWaveOutBuffers(), WAVE_MAPPER);
+ this, params, NumberOfWaveOutBuffers(), WAVE_MAPPER);
}
- // TODO(crogers): support more than stereo input.
+ // TODO(rtoy): support more than stereo input.
if (params.input_channels() > 0) {
DVLOG(1) << "WASAPIUnifiedStream is created.";
+ DLOG_IF(ERROR, !device_id.empty())
+ << "Opening by device id not supported by WASAPIUnifiedStream";
return new WASAPIUnifiedStream(this, params, input_device_id);
}
- return new WASAPIAudioOutputStream(this, params, eConsole);
+ return new WASAPIAudioOutputStream(this, device_id, params, eConsole);
}
// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
@@ -347,55 +380,68 @@ AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
return stream;
}
+std::string AudioManagerWin::GetDefaultOutputDeviceID() {
+ if (!CoreAudioUtil::IsSupported())
+ return std::string();
+ return CoreAudioUtil::GetDefaultOutputDeviceID();
+}
+
AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) {
+ const bool core_audio_supported = CoreAudioUtil::IsSupported();
+ DLOG_IF(ERROR, !core_audio_supported && !output_device_id.empty())
+ << "CoreAudio is required to open non-default devices.";
+
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = 48000;
int buffer_size = kFallbackBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
- bool use_input_params = !CoreAudioUtil::IsSupported();
- if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
- // TODO(crogers): tune these values for best possible WebAudio performance.
- // WebRTC works well at 48kHz and a buffer size of 480 samples will be used
- // for this case. Note that exclusive mode is experimental.
- // This sample rate will be combined with a buffer size of 256 samples,
- // which corresponds to an output delay of ~5.33ms.
- sample_rate = 48000;
- buffer_size = 256;
- if (input_params.IsValid())
- channel_layout = input_params.channel_layout();
- } else if (!use_input_params) {
- // Hardware sample-rate on Windows can be configured, so we must query.
- // TODO(henrika): improve possibility to specify an audio endpoint.
- // Use the default device (same as for Wave) for now to be compatible.
- int hw_sample_rate = WASAPIAudioOutputStream::HardwareSampleRate();
-
- AudioParameters params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
- &params);
- int hw_buffer_size =
- FAILED(hr) ? kFallbackBufferSize : params.frames_per_buffer();
- channel_layout = WASAPIAudioOutputStream::HardwareChannelLayout();
-
- // TODO(henrika): Figure out the right thing to do here.
- if (hw_sample_rate && hw_buffer_size) {
- sample_rate = hw_sample_rate;
- buffer_size = hw_buffer_size;
+ bool use_input_params = !core_audio_supported;
+ if (core_audio_supported) {
+ if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
+ // TODO(rtoy): tune these values for best possible WebAudio
+ // performance. WebRTC works well at 48kHz and a buffer size of 480
+ // samples will be used for this case. Note that exclusive mode is
+ // experimental. This sample rate will be combined with a buffer size of
+ // 256 samples, which corresponds to an output delay of ~5.33ms.
+ sample_rate = 48000;
+ buffer_size = 256;
+ if (input_params.IsValid())
+ channel_layout = input_params.channel_layout();
} else {
- use_input_params = true;
+ AudioParameters params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
+ output_device_id.empty() ?
+ GetDefaultOutputDeviceID() : output_device_id,
+ &params);
+ if (SUCCEEDED(hr)) {
+ bits_per_sample = params.bits_per_sample();
+ buffer_size = params.frames_per_buffer();
+ channel_layout = params.channel_layout();
+ sample_rate = params.sample_rate();
+ } else {
+ use_input_params = true;
+ }
}
}
if (input_params.IsValid()) {
- if (cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts) &&
- CoreAudioUtil::IsSupported()) {
+ if (core_audio_supported &&
+ cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts)) {
// Check if it is possible to open up at the specified input channel
// layout but avoid checking if the specified layout is the same as the
// hardware (preferred) layout. We do this extra check to avoid the
// CoreAudioUtil::IsChannelLayoutSupported() overhead in most cases.
if (input_params.channel_layout() != channel_layout) {
+ // TODO(henrika): Use |output_device_id| here.
+ // Internally, IsChannelLayoutSupported does many of the operations
+ // that have already been done such as opening up a client and fetching
+ // the WAVEFORMATPCMEX format. Ideally we should only do that once and
+ // do it for the requested device. Then here, we can check the layout
+ // from the data we already hold.
if (CoreAudioUtil::IsChannelLayoutSupported(
eRender, eConsole, input_params.channel_layout())) {
// Open up using the same channel layout as the source if it is
@@ -413,10 +459,10 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
// equal to the input values, AudioOutputResampler will skip resampling
// and bit per sample differences (since the input parameters will match
// the output parameters).
- sample_rate = input_params.sample_rate();
bits_per_sample = input_params.bits_per_sample();
- channel_layout = input_params.channel_layout();
buffer_size = input_params.frames_per_buffer();
+ channel_layout = input_params.channel_layout();
+ sample_rate = input_params.sample_rate();
}
}
@@ -435,7 +481,7 @@ AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
std::string xp_device_id = device_id;
if (device_id != AudioManagerBase::kDefaultDeviceId &&
enumeration_type_ == kMMDeviceEnumeration) {
- xp_device_id = media::ConvertToWinXPDeviceId(device_id);
+ xp_device_id = ConvertToWinXPInputDeviceId(device_id);
if (xp_device_id.empty()) {
DLOG(ERROR) << "Cannot find a waveIn device which matches the device ID "
<< device_id;
diff --git a/chromium/media/audio/win/audio_manager_win.h b/chromium/media/audio/win/audio_manager_win.h
index 65cc73bbd6e..86e22badc5f 100644
--- a/chromium/media/audio/win/audio_manager_win.h
+++ b/chromium/media/audio/win/audio_manager_win.h
@@ -25,26 +25,33 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
virtual bool HasAudioInputDevices() OVERRIDE;
virtual string16 GetAudioInputDeviceModel() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
+ virtual void GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
+ virtual void GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) OVERRIDE;
virtual AudioParameters GetInputStreamParameters(
const std::string& device_id) OVERRIDE;
+ virtual std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) OVERRIDE;
// Implementation of AudioManagerBase.
virtual AudioOutputStream* MakeLinearOutputStream(
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
+ const std::string& device_id,
const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual std::string GetDefaultOutputDeviceID() OVERRIDE;
protected:
virtual ~AudioManagerWin();
virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
const AudioParameters& input_params) OVERRIDE;
private:
@@ -55,7 +62,7 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
};
// Allow unit test to modify the utilized enumeration API.
- friend class AudioInputDeviceTest;
+ friend class AudioManagerTest;
EnumerationType enumeration_type_;
EnumerationType enumeration_type() { return enumeration_type_; }
@@ -76,6 +83,8 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
void CreateDeviceListener();
void DestroyDeviceListener();
+ void GetAudioDeviceNamesImpl(bool input, AudioDeviceNames* device_names);
+
// Listen for output device changes.
scoped_ptr<AudioDeviceListenerWin> output_device_listener_;
diff --git a/chromium/media/audio/win/audio_output_win_unittest.cc b/chromium/media/audio/win/audio_output_win_unittest.cc
index 4e13d84f3d6..7ce146b0ab4 100644
--- a/chromium/media/audio/win/audio_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_output_win_unittest.cc
@@ -185,7 +185,7 @@ TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
oas->Close();
}
@@ -201,29 +201,29 @@ TEST(WinAudioTest, SanityOnMakeParams) {
AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, -100),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, 0),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16,
media::limits::kMaxSamplesPerPacket + 1),
- std::string()));
+ std::string(), std::string()));
}
// Test that it can be opened and closed.
@@ -237,7 +237,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
oas->Close();
@@ -254,7 +254,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 1024 * 1024 * 1024),
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(NULL == oas);
if (oas)
oas->Close();
@@ -273,7 +273,7 @@ TEST(WinAudioTest, PCMWaveSlowSource) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
16000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
TestSourceLaggy test_laggy(2, 90);
EXPECT_TRUE(oas->Open());
@@ -302,7 +302,7 @@ TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -333,7 +333,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -362,7 +362,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate / 2, 16,
samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
@@ -402,7 +402,7 @@ TEST(WinAudioTest, PushSourceFile16KHz) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
kSampleRate, 16, kSamples100ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
@@ -439,7 +439,7 @@ TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -486,7 +486,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_MONO, sample_rate,
16, n * samples_10_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200, sample_rate);
@@ -520,7 +520,7 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
NiceMock<MockAudioSource> source;
@@ -680,7 +680,7 @@ TEST(WinAudioTest, SyncSocketBasic) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params,
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
ASSERT_TRUE(oas->Open());
diff --git a/chromium/media/audio/win/audio_unified_win_unittest.cc b/chromium/media/audio/win/audio_unified_win_unittest.cc
index cfd17aea14f..011c36348b5 100644
--- a/chromium/media/audio/win/audio_unified_win_unittest.cc
+++ b/chromium/media/audio/win/audio_unified_win_unittest.cc
@@ -196,13 +196,13 @@ class AudioUnifiedStreamWrapper {
// Creates an AudioOutputStream object using default parameters.
WASAPIUnifiedStream* Create() {
- return static_cast<WASAPIUnifiedStream*> (CreateOutputStream());
+ return static_cast<WASAPIUnifiedStream*>(CreateOutputStream());
}
// Creates an AudioOutputStream object using default parameters but a
// specified input device.
WASAPIUnifiedStream* Create(const std::string device_id) {
- return static_cast<WASAPIUnifiedStream*> (CreateOutputStream(device_id));
+ return static_cast<WASAPIUnifiedStream*>(CreateOutputStream(device_id));
}
AudioParameters::Format format() const { return params_.format(); }
@@ -223,20 +223,21 @@ class AudioUnifiedStreamWrapper {
CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
AudioDeviceName name;
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
- const std::string& device_id = name.unique_id;
- EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole, device_id));
+ const std::string& input_device_id = name.unique_id;
+ EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole,
+ input_device_id));
// Create the unified audio I/O stream using the default input device.
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- device_id);
+ "", input_device_id);
EXPECT_TRUE(aos);
return aos;
}
- AudioOutputStream* CreateOutputStream(const std::string& device_id) {
+ AudioOutputStream* CreateOutputStream(const std::string& input_device_id) {
// Create the unified audio I/O stream using the specified input device.
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- device_id);
+ "", input_device_id);
EXPECT_TRUE(aos);
return aos;
}
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index 392184b7a01..4adfdda090a 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -4,8 +4,9 @@
#include "media/audio/win/core_audio_util_win.h"
-#include <Audioclient.h>
-#include <Functiondiscoverykeys_devpkey.h>
+#include <audioclient.h>
+#include <devicetopology.h>
+#include <functiondiscoverykeys_devpkey.h>
#include "base/command_line.h"
#include "base/logging.h"
@@ -122,7 +123,7 @@ static std::ostream& operator<<(std::ostream& os,
return os;
}
-bool LoadAudiosesDll() {
+static bool LoadAudiosesDll() {
static const wchar_t* const kAudiosesDLL =
L"%WINDIR%\\system32\\audioses.dll";
@@ -131,7 +132,7 @@ bool LoadAudiosesDll() {
return (LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH) != NULL);
}
-bool CanCreateDeviceEnumerator() {
+static bool CanCreateDeviceEnumerator() {
ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER);
@@ -143,6 +144,14 @@ bool CanCreateDeviceEnumerator() {
return SUCCEEDED(hr);
}
+static std::string GetDeviceID(IMMDevice* device) {
+ ScopedCoMem<WCHAR> device_id_com;
+ std::string device_id;
+ if (SUCCEEDED(device->GetId(&device_id_com)))
+ WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
+ return device_id;
+}
+
bool CoreAudioUtil::IsSupported() {
// It is possible to force usage of WaveXxx APIs by using a command line flag.
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -262,6 +271,12 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
return endpoint_device;
}
+std::string CoreAudioUtil::GetDefaultOutputDeviceID() {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> device(CreateDefaultDevice(eRender, eConsole));
+ return device ? GetDeviceID(device) : std::string();
+}
+
ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
const std::string& device_id) {
DCHECK(IsSupported());
@@ -288,17 +303,14 @@ HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
// Retrieve unique name of endpoint device.
// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
AudioDeviceName device_name;
- ScopedCoMem<WCHAR> endpoint_device_id;
- HRESULT hr = device->GetId(&endpoint_device_id);
- if (FAILED(hr))
- return hr;
- WideToUTF8(endpoint_device_id, wcslen(endpoint_device_id),
- &device_name.unique_id);
+ device_name.unique_id = GetDeviceID(device);
+ if (device_name.unique_id.empty())
+ return E_FAIL;
// Retrieve user-friendly name of endpoint device.
// Example: "Microphone (Realtek High Definition Audio)".
ScopedComPtr<IPropertyStore> properties;
- hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
+ HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
if (FAILED(hr))
return hr;
base::win::ScopedPropVariant friendly_name;
@@ -317,6 +329,88 @@ HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
return hr;
}
+std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
+ IMMDeviceEnumerator* enumerator) {
+ DCHECK(IsSupported());
+
+ // Fetching the controller device id could be as simple as fetching the value
+ // of the "{B3F8FA53-0004-438E-9003-51A46E139BFC},2" property in the property
+ // store of the |device|, but that key isn't defined in any header and
+ // according to MS should not be relied upon.
+ // So, instead, we go deeper, look at the device topology and fetch the
+ // PKEY_Device_InstanceId of the associated physical audio device.
+ ScopedComPtr<IDeviceTopology> topology;
+ ScopedComPtr<IConnector> connector;
+ ScopedCoMem<WCHAR> filter_id;
+ if (FAILED(device->Activate(__uuidof(IDeviceTopology), CLSCTX_ALL, NULL,
+ topology.ReceiveVoid()) ||
+ // For our purposes checking the first connected device should be enough
+ // and if there are cases where there are more than one device connected
+ // we're not sure how to handle that anyway. So we pass 0.
+ FAILED(topology->GetConnector(0, connector.Receive())) ||
+ FAILED(connector->GetDeviceIdConnectedTo(&filter_id)))) {
+ DLOG(ERROR) << "Failed to get the device identifier of the audio device";
+ return std::string();
+ }
+
+ // Now look at the properties of the connected device node and fetch the
+ // instance id (PKEY_Device_InstanceId) of the device node that uniquely
+ // identifies the controller.
+ ScopedComPtr<IMMDevice> device_node;
+ ScopedComPtr<IPropertyStore> properties;
+ base::win::ScopedPropVariant instance_id;
+ if (FAILED(enumerator->GetDevice(filter_id, device_node.Receive())) ||
+ FAILED(device_node->OpenPropertyStore(STGM_READ, properties.Receive())) ||
+ FAILED(properties->GetValue(PKEY_Device_InstanceId,
+ instance_id.Receive())) ||
+ instance_id.get().vt != VT_LPWSTR) {
+ DLOG(ERROR) << "Failed to get instance id of the audio device node";
+ return std::string();
+ }
+
+ std::string controller_id;
+ WideToUTF8(instance_id.get().pwszVal,
+ wcslen(instance_id.get().pwszVal),
+ &controller_id);
+
+ return controller_id;
+}
+
+std::string CoreAudioUtil::GetMatchingOutputDeviceID(
+ const std::string& input_device_id) {
+ ScopedComPtr<IMMDevice> input_device(CreateDevice(input_device_id));
+ if (!input_device)
+ return std::string();
+
+ // See if we can get id of the associated controller.
+ ScopedComPtr<IMMDeviceEnumerator> enumerator(CreateDeviceEnumerator());
+ std::string controller_id(GetAudioControllerID(input_device, enumerator));
+ if (controller_id.empty())
+ return std::string();
+
+ // Now enumerate the available (and active) output devices and see if any of
+ // them is associated with the same controller.
+ ScopedComPtr<IMMDeviceCollection> collection;
+ enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE,
+ collection.Receive());
+ if (!collection)
+ return std::string();
+
+ UINT count = 0;
+ collection->GetCount(&count);
+ ScopedComPtr<IMMDevice> output_device;
+ for (UINT i = 0; i < count; ++i) {
+ collection->Item(i, output_device.Receive());
+ std::string output_controller_id(CoreAudioUtil::GetAudioControllerID(
+ output_device, enumerator));
+ if (output_controller_id == controller_id)
+ break;
+ output_device = NULL;
+ }
+
+ return output_device ? GetDeviceID(output_device) : std::string();
+}
+
std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
DCHECK(IsSupported());
ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
@@ -339,16 +433,8 @@ bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
if (!device)
return false;
- ScopedCoMem<WCHAR> default_device_id;
- HRESULT hr = device->GetId(&default_device_id);
- if (FAILED(hr))
- return false;
-
- std::string str_default;
- WideToUTF8(default_device_id, wcslen(default_device_id), &str_default);
- if (device_id.compare(str_default) != 0)
- return false;
- return true;
+ std::string str_default(GetDeviceID(device));
+ return device_id.compare(str_default) == 0;
}
EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
index 3b2734570d0..cdf6dfb11df 100644
--- a/chromium/media/audio/win/core_audio_util_win.h
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -59,6 +59,10 @@ class MEDIA_EXPORT CoreAudioUtil {
static ScopedComPtr<IMMDevice> CreateDefaultDevice(
EDataFlow data_flow, ERole role);
+ // Returns the device id of the default output device or an empty string
+ // if no such device exists or if the default device has been disabled.
+ static std::string GetDefaultOutputDeviceID();
+
// Creates an endpoint device that is specified by a unique endpoint device-
// identification string.
static ScopedComPtr<IMMDevice> CreateDevice(const std::string& device_id);
@@ -68,6 +72,24 @@ class MEDIA_EXPORT CoreAudioUtil {
// "Microphone (Realtek High Definition Audio)".
static HRESULT GetDeviceName(IMMDevice* device, AudioDeviceName* name);
+ // Returns the device ID/path of the controller (a.k.a. physical device that
+ // |device| is connected to. This ID will be the same for all devices from
+ // the same controller so it is useful for doing things like determining
+ // whether a set of output and input devices belong to the same controller.
+ // The device enumerator is required as well as the device itself since
+ // looking at the device topology is required and we need to open up
+ // associated devices to determine the controller id.
+ // If the ID could not be determined for some reason, an empty string is
+ // returned.
+ static std::string GetAudioControllerID(IMMDevice* device,
+ IMMDeviceEnumerator* enumerator);
+
+ // Accepts an id of an input device and finds a matching output device id.
+ // If the associated hardware does not have an audio output device (e.g.
+ // a webcam with a mic), an empty string is returned.
+ static std::string GetMatchingOutputDeviceID(
+ const std::string& input_device_id);
+
// Gets the user-friendly name of the endpoint device which is represented
// by a unique id in |device_id|.
static std::string GetFriendlyName(const std::string& device_id);
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
index 6d3e1fcf093..abef8682020 100644
--- a/chromium/media/audio/win/core_audio_util_win_unittest.cc
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/waitable_event.h"
+#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_handle.h"
#include "media/audio/win/core_audio_util_win.h"
@@ -140,6 +142,33 @@ TEST_F(CoreAudioUtilWinTest, GetDefaultDeviceName) {
}
}
+TEST_F(CoreAudioUtilWinTest, GetAudioControllerID) {
+ if (!CanRunAudioTest())
+ return;
+
+ ScopedComPtr<IMMDeviceEnumerator> enumerator(
+ CoreAudioUtil::CreateDeviceEnumerator());
+ ASSERT_TRUE(enumerator);
+
+ // Enumerate all active input and output devices and fetch the ID of
+ // the associated device.
+ EDataFlow flows[] = { eRender , eCapture };
+ for (int i = 0; i < arraysize(flows); ++i) {
+ ScopedComPtr<IMMDeviceCollection> collection;
+ ASSERT_TRUE(SUCCEEDED(enumerator->EnumAudioEndpoints(flows[i],
+ DEVICE_STATE_ACTIVE, collection.Receive())));
+ UINT count = 0;
+ collection->GetCount(&count);
+ for (UINT j = 0; j < count; ++j) {
+ ScopedComPtr<IMMDevice> device;
+ collection->Item(j, device.Receive());
+ std::string controller_id(CoreAudioUtil::GetAudioControllerID(
+ device, enumerator));
+ EXPECT_FALSE(controller_id.empty());
+ }
+ }
+}
+
TEST_F(CoreAudioUtilWinTest, GetFriendlyName) {
if (!CanRunAudioTest())
return;
@@ -448,6 +477,46 @@ TEST_F(CoreAudioUtilWinTest, FillRenderEndpointBufferWithSilence) {
EXPECT_EQ(num_queued_frames, endpoint_buffer_size);
}
-//
+// This test can only succeed on a machine that has audio hardware
+// that has both input and output devices. Currently this is the case
+// with our test bots and the CanRunAudioTest() method should make sure
+// that the test won't run in unsupported environments, but be warned.
+TEST_F(CoreAudioUtilWinTest, GetMatchingOutputDeviceID) {
+ if (!CanRunAudioTest())
+ return;
+
+ bool found_a_pair = false;
+
+ ScopedComPtr<IMMDeviceEnumerator> enumerator(
+ CoreAudioUtil::CreateDeviceEnumerator());
+ ASSERT_TRUE(enumerator);
+
+ // Enumerate all active input and output devices and fetch the ID of
+ // the associated device.
+ ScopedComPtr<IMMDeviceCollection> collection;
+ ASSERT_TRUE(SUCCEEDED(enumerator->EnumAudioEndpoints(eCapture,
+ DEVICE_STATE_ACTIVE, collection.Receive())));
+ UINT count = 0;
+ collection->GetCount(&count);
+ for (UINT i = 0; i < count && !found_a_pair; ++i) {
+ ScopedComPtr<IMMDevice> device;
+ collection->Item(i, device.Receive());
+ base::win::ScopedCoMem<WCHAR> wide_id;
+ device->GetId(&wide_id);
+ std::string id;
+ WideToUTF8(wide_id, wcslen(wide_id), &id);
+ found_a_pair = !CoreAudioUtil::GetMatchingOutputDeviceID(id).empty();
+ }
+
+ EXPECT_TRUE(found_a_pair);
+}
+
+TEST_F(CoreAudioUtilWinTest, GetDefaultOutputDeviceID) {
+ if (!CanRunAudioTest())
+ return;
+
+ std::string default_device_id(CoreAudioUtil::GetDefaultOutputDeviceID());
+ EXPECT_FALSE(default_device_id.empty());
+}
} // namespace media
diff --git a/chromium/media/audio/win/device_enumeration_win.cc b/chromium/media/audio/win/device_enumeration_win.cc
index 36ed2913ffe..aa66afb12b1 100644
--- a/chromium/media/audio/win/device_enumeration_win.cc
+++ b/chromium/media/audio/win/device_enumeration_win.cc
@@ -8,13 +8,13 @@
#include "media/audio/win/audio_manager_win.h"
+#include "base/basictypes.h"
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_comptr.h"
#include "base/win/scoped_propvariant.h"
-using media::AudioDeviceNames;
using base::win::ScopedComPtr;
using base::win::ScopedCoMem;
@@ -25,7 +25,8 @@ using base::win::ScopedCoMem;
namespace media {
-bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
+static bool GetDeviceNamesWinImpl(EDataFlow data_flow,
+ AudioDeviceNames* device_names) {
// It is assumed that this method is called from a COM thread, i.e.,
// CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
ScopedComPtr<IMMDeviceEnumerator> enumerator;
@@ -37,24 +38,24 @@ bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
return false;
}
- // Generate a collection of active audio capture endpoint devices.
+ // Generate a collection of active audio endpoint devices.
// This method will succeed even if all devices are disabled.
ScopedComPtr<IMMDeviceCollection> collection;
- hr = enumerator->EnumAudioEndpoints(eCapture,
+ hr = enumerator->EnumAudioEndpoints(data_flow,
DEVICE_STATE_ACTIVE,
collection.Receive());
if (FAILED(hr))
return false;
- // Retrieve the number of active capture devices.
+ // Retrieve the number of active devices.
UINT number_of_active_devices = 0;
collection->GetCount(&number_of_active_devices);
if (number_of_active_devices == 0)
return true;
- media::AudioDeviceName device;
+ AudioDeviceName device;
- // Loop over all active capture devices and add friendly name and
+ // Loop over all active devices and add friendly name and
// unique ID to the |device_names| list.
for (UINT i = 0; i < number_of_active_devices; ++i) {
// Retrieve unique name of endpoint device.
@@ -92,14 +93,22 @@ bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
return true;
}
-bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
+// The waveform API is weird in that it has completely separate but
+// almost identical functions and structs for input devices vs. output
+// devices. We deal with this by implementing the logic as a templated
+// function that takes the functions and struct type to use as
+// template parameters.
+template <UINT (__stdcall *NumDevsFunc)(),
+ typename CAPSSTRUCT,
+ MMRESULT (__stdcall *DevCapsFunc)(UINT_PTR, CAPSSTRUCT*, UINT)>
+static bool GetDeviceNamesWinXPImpl(AudioDeviceNames* device_names) {
// Retrieve the number of active waveform input devices.
- UINT number_of_active_devices = waveInGetNumDevs();
+ UINT number_of_active_devices = NumDevsFunc();
if (number_of_active_devices == 0)
return true;
- media::AudioDeviceName device;
- WAVEINCAPS capabilities;
+ AudioDeviceName device;
+ CAPSSTRUCT capabilities;
MMRESULT err = MMSYSERR_NOERROR;
// Loop over all active capture devices and add friendly name and
@@ -108,7 +117,7 @@ bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
// there is no safe method to retrieve a unique device name on XP.
for (UINT i = 0; i < number_of_active_devices; ++i) {
// Retrieve the capabilities of the specified waveform-audio input device.
- err = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
+ err = DevCapsFunc(i, &capabilities, sizeof(capabilities));
if (err != MMSYSERR_NOERROR)
continue;
@@ -118,7 +127,7 @@ bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
device.device_name = WideToUTF8(capabilities.szPname);
// Store the "unique" name (we use same as friendly name on Windows XP).
- device.unique_id = WideToUTF8(capabilities.szPname);
+ device.unique_id = device.device_name;
// Add combination of user-friendly and unique name to the output list.
device_names->push_back(device);
@@ -127,7 +136,25 @@ bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
return true;
}
-std::string ConvertToWinXPDeviceId(const std::string& device_id) {
+bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinImpl(eCapture, device_names);
+}
+
+bool GetOutputDeviceNamesWin(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinImpl(eRender, device_names);
+}
+
+bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinXPImpl<
+ waveInGetNumDevs, WAVEINCAPSW, waveInGetDevCapsW>(device_names);
+}
+
+bool GetOutputDeviceNamesWinXP(AudioDeviceNames* device_names) {
+ return GetDeviceNamesWinXPImpl<
+ waveOutGetNumDevs, WAVEOUTCAPSW, waveOutGetDevCapsW>(device_names);
+}
+
+std::string ConvertToWinXPInputDeviceId(const std::string& device_id) {
UINT number_of_active_devices = waveInGetNumDevs();
MMRESULT result = MMSYSERR_NOERROR;
diff --git a/chromium/media/audio/win/device_enumeration_win.h b/chromium/media/audio/win/device_enumeration_win.h
index 3d44670a6d3..e61a331842a 100644
--- a/chromium/media/audio/win/device_enumeration_win.h
+++ b/chromium/media/audio/win/device_enumeration_win.h
@@ -11,28 +11,32 @@
namespace media {
-// Returns a list of audio input device structures (name and unique device ID)
-// using the MMDevice API which is supported on Windows Vista and higher.
+// Returns a list of audio input or output device structures (name and
+// unique device ID) using the MMDevice API which is supported on
+// Windows Vista and higher.
// Example record in the output list:
// - device_name: "Microphone (Realtek High Definition Audio)".
// - unique_id: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
// This method must be called from a COM thread using MTA.
bool GetInputDeviceNamesWin(media::AudioDeviceNames* device_names);
+bool GetOutputDeviceNamesWin(media::AudioDeviceNames* device_names);
-// Returns a list of audio input device structures (name and unique device ID)
-// using the WaveIn API which is supported on Windows XP and higher.
+// Returns a list of audio input or output device structures (name and
+// unique device ID) using the WaveIn API which is supported on
+// Windows XP and higher.
// Example record in the output list:
// - device_name: "Microphone (Realtek High Defini".
// - unique_id: "Microphone (Realtek High Defini" (same as friendly name).
bool GetInputDeviceNamesWinXP(media::AudioDeviceNames* device_names);
+bool GetOutputDeviceNamesWinXP(media::AudioDeviceNames* device_names);
-// Converts a device ID generated by |GetInputDeviceNamesWin()| to the
+// Converts an input device ID generated by |GetInputDeviceNamesWin()| to the
// corresponding ID by |GetInputDeviceNamesWinXP()|. Returns an empty string on
// failure.
// Example input and output:
// - input ID: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
// - output ID: "Microphone (Realtek High Defini"
-std::string ConvertToWinXPDeviceId(const std::string& device_id);
+std::string ConvertToWinXPInputDeviceId(const std::string& device_id);
} // namespace media
diff --git a/chromium/media/audio/win/wavein_input_win.h b/chromium/media/audio/win/wavein_input_win.h
index 4b830e34805..df5ce4d129b 100644
--- a/chromium/media/audio/win/wavein_input_win.h
+++ b/chromium/media/audio/win/wavein_input_win.h
@@ -56,7 +56,7 @@ class PCMWaveInAudioInputStream : public AudioInputStream {
};
// Allow unit tests to query the device ID.
- friend class AudioInputDeviceTest;
+ friend class AudioManagerTest;
// Windows calls us back with the recorded audio data here. See msdn
// documentation for 'waveInProc' for details about the parameters.
diff --git a/chromium/media/base/android/audio_decoder_job.cc b/chromium/media/base/android/audio_decoder_job.cc
new file mode 100644
index 00000000000..2ac7c0389cb
--- /dev/null
+++ b/chromium/media/base/android/audio_decoder_job.cc
@@ -0,0 +1,77 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/audio_decoder_job.h"
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/threading/thread.h"
+#include "media/base/android/media_codec_bridge.h"
+
+namespace media {
+
+class AudioDecoderThread : public base::Thread {
+ public:
+ AudioDecoderThread() : base::Thread("MediaSource_AudioDecoderThread") {
+ Start();
+ }
+};
+
+// TODO(qinmin): Check if it is tolerable to use worker pool to handle all the
+// decoding tasks so that we don't need a global thread here.
+// http://crbug.com/245750
+base::LazyInstance<AudioDecoderThread>::Leaky
+ g_audio_decoder_thread = LAZY_INSTANCE_INITIALIZER;
+
+AudioDecoderJob* AudioDecoderJob::Create(
+ const AudioCodec audio_codec,
+ int sample_rate,
+ int channel_count,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ jobject media_crypto,
+ const base::Closure& request_data_cb) {
+ scoped_ptr<AudioCodecBridge> codec(AudioCodecBridge::Create(audio_codec));
+ if (codec && codec->Start(audio_codec, sample_rate, channel_count, extra_data,
+ extra_data_size, true, media_crypto)) {
+ return new AudioDecoderJob(codec.Pass(), request_data_cb);
+ }
+
+ LOG(ERROR) << "Failed to create AudioDecoderJob.";
+ return NULL;
+}
+
+AudioDecoderJob::AudioDecoderJob(
+ scoped_ptr<AudioCodecBridge> audio_codec_bridge,
+ const base::Closure& request_data_cb)
+ : MediaDecoderJob(g_audio_decoder_thread.Pointer()->message_loop_proxy(),
+ audio_codec_bridge.get(), request_data_cb),
+ audio_codec_bridge_(audio_codec_bridge.Pass()) {
+}
+
+AudioDecoderJob::~AudioDecoderJob() {
+}
+
+void AudioDecoderJob::SetVolume(double volume) {
+ audio_codec_bridge_->SetVolume(volume);
+}
+
+void AudioDecoderJob::ReleaseOutputBuffer(
+ int outputBufferIndex, size_t size,
+ const base::TimeDelta& presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback,
+ MediaCodecStatus status) {
+ audio_codec_bridge_->PlayOutputBuffer(outputBufferIndex, size);
+
+ if (status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u)
+ audio_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, false);
+
+ callback.Run(status, presentation_timestamp, size);
+}
+
+bool AudioDecoderJob::ComputeTimeToRender() const {
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/audio_decoder_job.h b/chromium/media/base/android/audio_decoder_job.h
new file mode 100644
index 00000000000..6ad8c28e25b
--- /dev/null
+++ b/chromium/media/base/android/audio_decoder_job.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_AUDIO_DECODER_JOB_H_
+#define MEDIA_BASE_ANDROID_AUDIO_DECODER_JOB_H_
+
+#include <jni.h>
+
+#include "media/base/android/media_decoder_job.h"
+
+namespace media {
+
+class AudioCodecBridge;
+
+// Class for managing audio decoding jobs.
+class AudioDecoderJob : public MediaDecoderJob {
+ public:
+ virtual ~AudioDecoderJob();
+
+ // Creates a new AudioDecoderJob instance for decoding audio.
+ // |audio_codec| - The audio format the object needs to decode.
+ // |sample_rate| - The sample rate of the decoded output.
+ // |channel_count| - The number of channels in the decoded output.
+ // |extra_data|, |extra_data_size| - Extra data buffer needed for initializing
+ // the decoder.
+ // |media_crypto| - Handle to a Java object that handles the encryption for
+ // the audio data.
+ // |request_data_cb| - Callback used to request more data for the decoder.
+ static AudioDecoderJob* Create(
+ const AudioCodec audio_codec, int sample_rate, int channel_count,
+ const uint8* extra_data, size_t extra_data_size, jobject media_crypto,
+ const base::Closure& request_data_cb);
+
+ void SetVolume(double volume);
+
+ private:
+ AudioDecoderJob(scoped_ptr<AudioCodecBridge> audio_decoder_bridge,
+ const base::Closure& request_data_cb);
+
+ // MediaDecoderJob implementation.
+ virtual void ReleaseOutputBuffer(
+ int outputBufferIndex, size_t size,
+ const base::TimeDelta& presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback,
+ MediaCodecStatus status) OVERRIDE;
+
+ virtual bool ComputeTimeToRender() const OVERRIDE;
+
+ scoped_ptr<AudioCodecBridge> audio_codec_bridge_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_AUDIO_DECODER_JOB_H_
diff --git a/chromium/media/base/android/demuxer_android.h b/chromium/media/base/android/demuxer_android.h
new file mode 100644
index 00000000000..33902db728d
--- /dev/null
+++ b/chromium/media/base/android/demuxer_android.h
@@ -0,0 +1,77 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_DEMUXER_ANDROID_H_
+#define MEDIA_BASE_ANDROID_DEMUXER_ANDROID_H_
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class DemuxerAndroidClient;
+struct DemuxerConfigs;
+struct DemuxerData;
+
+// Defines a demuxer with ID-based asynchronous operations.
+//
+// TODO(scherkus): Remove |demuxer_client_id| and Add/RemoveDemuxerClient().
+// It's required in the interim as the Android Media Source implementation uses
+// the MediaPlayerAndroid interface and associated IPC messages.
+class MEDIA_EXPORT DemuxerAndroid {
+ public:
+ // Associates |client| with the demuxer using |demuxer_client_id| as the
+ // identifier. Must be called prior to calling any other methods.
+ virtual void AddDemuxerClient(int demuxer_client_id,
+ DemuxerAndroidClient* client) = 0;
+
+ // Removes the association created by AddClient(). Must be called when the
+ // client no longer wants to receive updates.
+ virtual void RemoveDemuxerClient(int demuxer_client_id) = 0;
+
+ // Called to request the current audio/video decoder configurations.
+ virtual void RequestDemuxerConfigs(int demuxer_client_id) = 0;
+
+ // Called to request additiona data from the demuxer.
+ virtual void RequestDemuxerData(int demuxer_client_id,
+ media::DemuxerStream::Type type) = 0;
+
+ // Called to request the demuxer to seek to a particular media time.
+ virtual void RequestDemuxerSeek(int demuxer_client_id,
+ base::TimeDelta time_to_seek,
+ unsigned seek_request_id) = 0;
+
+ protected:
+ virtual ~DemuxerAndroid() {}
+};
+
+// Defines the client callback interface.
+class MEDIA_EXPORT DemuxerAndroidClient {
+ public:
+ // Called in response to RequestDemuxerConfigs() and also when the demuxer has
+ // initialized.
+ //
+ // TODO(scherkus): Perhaps clients should be required to call
+ // RequestDemuxerConfigs() to initialize themselves instead of the demuxer
+ // calling this method without being prompted.
+ virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) = 0;
+
+ // Called in response to RequestDemuxerData().
+ virtual void OnDemuxerDataAvailable(const DemuxerData& params) = 0;
+
+ // Called in response to RequestDemuxerSeek().
+ virtual void OnDemuxerSeeked(unsigned seek_request_id) = 0;
+
+ // Called whenever the demuxer has detected a duration change.
+ virtual void OnDemuxerDurationChanged(base::TimeDelta duration) = 0;
+
+ protected:
+ virtual ~DemuxerAndroidClient() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_DEMUXER_ANDROID_H_
diff --git a/chromium/media/base/android/demuxer_stream_player_params.cc b/chromium/media/base/android/demuxer_stream_player_params.cc
index 827be119565..d5021a968c7 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.cc
+++ b/chromium/media/base/android/demuxer_stream_player_params.cc
@@ -6,8 +6,7 @@
namespace media {
-MediaPlayerHostMsg_DemuxerReady_Params::
- MediaPlayerHostMsg_DemuxerReady_Params()
+DemuxerConfigs::DemuxerConfigs()
: audio_codec(kUnknownAudioCodec),
audio_channels(0),
audio_sampling_rate(0),
@@ -16,18 +15,14 @@ MediaPlayerHostMsg_DemuxerReady_Params::
is_video_encrypted(false),
duration_ms(0) {}
-MediaPlayerHostMsg_DemuxerReady_Params::
- ~MediaPlayerHostMsg_DemuxerReady_Params() {}
+DemuxerConfigs::~DemuxerConfigs() {}
AccessUnit::AccessUnit() : end_of_stream(false) {}
AccessUnit::~AccessUnit() {}
-MediaPlayerHostMsg_ReadFromDemuxerAck_Params::
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params()
- : type(DemuxerStream::UNKNOWN) {}
+DemuxerData::DemuxerData() : type(DemuxerStream::UNKNOWN) {}
-MediaPlayerHostMsg_ReadFromDemuxerAck_Params::
- ~MediaPlayerHostMsg_ReadFromDemuxerAck_Params() {}
+DemuxerData::~DemuxerData() {}
} // namespace media
diff --git a/chromium/media/base/android/demuxer_stream_player_params.h b/chromium/media/base/android/demuxer_stream_player_params.h
index a9fb0520ae5..4a3a04d10e0 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.h
+++ b/chromium/media/base/android/demuxer_stream_player_params.h
@@ -5,7 +5,9 @@
#ifndef MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
#define MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
+#if defined(GOOGLE_TV)
#include <string>
+#endif // defined(GOOGLE_TV)
#include <vector>
#include "media/base/audio_decoder_config.h"
@@ -17,9 +19,9 @@
namespace media {
-struct MEDIA_EXPORT MediaPlayerHostMsg_DemuxerReady_Params {
- MediaPlayerHostMsg_DemuxerReady_Params();
- ~MediaPlayerHostMsg_DemuxerReady_Params();
+struct MEDIA_EXPORT DemuxerConfigs {
+ DemuxerConfigs();
+ ~DemuxerConfigs();
AudioCodec audio_codec;
int audio_channels;
@@ -33,7 +35,10 @@ struct MEDIA_EXPORT MediaPlayerHostMsg_DemuxerReady_Params {
std::vector<uint8> video_extra_data;
int duration_ms;
+
+#if defined(GOOGLE_TV)
std::string key_system;
+#endif // defined(GOOGLE_TV)
};
struct MEDIA_EXPORT AccessUnit {
@@ -50,9 +55,9 @@ struct MEDIA_EXPORT AccessUnit {
std::vector<media::SubsampleEntry> subsamples;
};
-struct MEDIA_EXPORT MediaPlayerHostMsg_ReadFromDemuxerAck_Params {
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
- ~MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+struct MEDIA_EXPORT DemuxerData {
+ DemuxerData();
+ ~DemuxerData();
DemuxerStream::Type type;
std::vector<AccessUnit> access_units;
diff --git a/chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java b/chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
deleted file mode 100644
index a7afdae59c6..00000000000
--- a/chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.media;
-
-import android.content.BroadcastReceiver;
-import android.content.Context;
-import android.content.Intent;
-import android.content.IntentFilter;
-import android.content.pm.PackageManager;
-import android.media.AudioFormat;
-import android.media.AudioManager;
-import android.media.AudioRecord;
-import android.media.AudioTrack;
-import android.os.Build;
-import android.util.Log;
-
-import org.chromium.base.CalledByNative;
-import org.chromium.base.JNINamespace;
-
-@JNINamespace("media")
-class AudioManagerAndroid {
- private static final String TAG = "AudioManagerAndroid";
-
- // Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
- // is also widely used on other android devices.
- private static final int DEFAULT_SAMPLING_RATE = 44100;
- // Randomly picked up frame size which is close to return value on N4.
- // Return this default value when
- // getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
- private static final int DEFAULT_FRAME_PER_BUFFER = 256;
-
- private final AudioManager mAudioManager;
- private final Context mContext;
-
- private BroadcastReceiver mReceiver;
- private boolean mOriginalSpeakerStatus;
-
- @CalledByNative
- public void setMode(int mode) {
- try {
- mAudioManager.setMode(mode);
- if (mode == AudioManager.MODE_IN_COMMUNICATION) {
- mAudioManager.setSpeakerphoneOn(true);
- }
- } catch (SecurityException e) {
- Log.e(TAG, "setMode exception: " + e.getMessage());
- logDeviceInfo();
- }
- }
-
- @CalledByNative
- private static AudioManagerAndroid createAudioManagerAndroid(Context context) {
- return new AudioManagerAndroid(context);
- }
-
- private AudioManagerAndroid(Context context) {
- mContext = context;
- mAudioManager = (AudioManager)mContext.getSystemService(Context.AUDIO_SERVICE);
- }
-
- @CalledByNative
- public void registerHeadsetReceiver() {
- if (mReceiver != null) {
- return;
- }
-
- mOriginalSpeakerStatus = mAudioManager.isSpeakerphoneOn();
- IntentFilter filter = new IntentFilter(Intent.ACTION_HEADSET_PLUG);
-
- mReceiver = new BroadcastReceiver() {
- @Override
- public void onReceive(Context context, Intent intent) {
- if (Intent.ACTION_HEADSET_PLUG.equals(intent.getAction())) {
- try {
- mAudioManager.setSpeakerphoneOn(
- intent.getIntExtra("state", 0) == 0);
- } catch (SecurityException e) {
- Log.e(TAG, "setMode exception: " + e.getMessage());
- logDeviceInfo();
- }
- }
- }
- };
- mContext.registerReceiver(mReceiver, filter);
- }
-
- @CalledByNative
- public void unregisterHeadsetReceiver() {
- mContext.unregisterReceiver(mReceiver);
- mReceiver = null;
- mAudioManager.setSpeakerphoneOn(mOriginalSpeakerStatus);
- }
-
- private void logDeviceInfo() {
- Log.i(TAG, "Manufacturer:" + Build.MANUFACTURER +
- " Board: " + Build.BOARD + " Device: " + Build.DEVICE +
- " Model: " + Build.MODEL + " PRODUCT: " + Build.PRODUCT);
- }
-
- @CalledByNative
- private int getNativeOutputSampleRate() {
- if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.JELLY_BEAN_MR1) {
- String sampleRateString = mAudioManager.getProperty(
- AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
- return (sampleRateString == null ?
- DEFAULT_SAMPLING_RATE : Integer.parseInt(sampleRateString));
- } else {
- return DEFAULT_SAMPLING_RATE;
- }
- }
-
- /**
- * Returns the minimum frame size required for audio input.
- *
- * @param sampleRate sampling rate
- * @param channels number of channels
- */
- @CalledByNative
- private static int getMinInputFrameSize(int sampleRate, int channels) {
- int channelConfig;
- if (channels == 1) {
- channelConfig = AudioFormat.CHANNEL_IN_MONO;
- } else if (channels == 2) {
- channelConfig = AudioFormat.CHANNEL_IN_STEREO;
- } else {
- return -1;
- }
- return AudioRecord.getMinBufferSize(
- sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT) / 2 / channels;
- }
-
- /**
- * Returns the minimum frame size required for audio output.
- *
- * @param sampleRate sampling rate
- * @param channels number of channels
- */
- @CalledByNative
- private static int getMinOutputFrameSize(int sampleRate, int channels) {
- int channelConfig;
- if (channels == 1) {
- channelConfig = AudioFormat.CHANNEL_OUT_MONO;
- } else if (channels == 2) {
- channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
- } else {
- return -1;
- }
- return AudioTrack.getMinBufferSize(
- sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT) / 2 / channels;
- }
-
- @CalledByNative
- private boolean isAudioLowLatencySupported() {
- return mContext.getPackageManager().hasSystemFeature(
- PackageManager.FEATURE_AUDIO_LOW_LATENCY);
- }
-
- @CalledByNative
- private int getAudioLowLatencyOutputFrameSize() {
- String framesPerBuffer =
- mAudioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
- return (framesPerBuffer == null ?
- DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer));
- }
-
-}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java b/chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
deleted file mode 100644
index 9cce93dd7c4..00000000000
--- a/chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.media;
-
-import android.media.AudioFormat;
-import android.media.AudioManager;
-import android.media.AudioTrack;
-import android.media.MediaCodec;
-import android.media.MediaCrypto;
-import android.media.MediaFormat;
-import android.view.Surface;
-import android.util.Log;
-
-import java.nio.ByteBuffer;
-
-import org.chromium.base.CalledByNative;
-import org.chromium.base.JNINamespace;
-
-/**
- * A wrapper of the MediaCodec class to facilitate exception capturing and
- * audio rendering.
- */
-@JNINamespace("media")
-class MediaCodecBridge {
-
- private static final String TAG = "MediaCodecBridge";
-
- // Error code for MediaCodecBridge. Keep this value in sync with
- // INFO_MEDIA_CODEC_ERROR in media_codec_bridge.h.
- private static final int MEDIA_CODEC_OK = 0;
- private static final int MEDIA_CODEC_ERROR = -1000;
-
- // After a flush(), dequeueOutputBuffer() can often produce empty presentation timestamps
- // for several frames. As a result, the player may find that the time does not increase
- // after decoding a frame. To detect this, we check whether the presentation timestamp from
- // dequeueOutputBuffer() is larger than input_timestamp - MAX_PRESENTATION_TIMESTAMP_SHIFT_US
- // after a flush. And we set the presentation timestamp from dequeueOutputBuffer() to be
- // non-decreasing for the remaining frames.
- private static final long MAX_PRESENTATION_TIMESTAMP_SHIFT_US = 100000;
-
- private ByteBuffer[] mInputBuffers;
- private ByteBuffer[] mOutputBuffers;
-
- private MediaCodec mMediaCodec;
- private AudioTrack mAudioTrack;
- private boolean mFlushed;
- private long mLastPresentationTimeUs;
-
- private static class DequeueOutputResult {
- private final int mIndex;
- private final int mFlags;
- private final int mOffset;
- private final long mPresentationTimeMicroseconds;
- private final int mNumBytes;
-
- private DequeueOutputResult(int index, int flags, int offset,
- long presentationTimeMicroseconds, int numBytes) {
- mIndex = index;
- mFlags = flags;
- mOffset = offset;
- mPresentationTimeMicroseconds = presentationTimeMicroseconds;
- mNumBytes = numBytes;
- }
-
- @CalledByNative("DequeueOutputResult")
- private int index() { return mIndex; }
-
- @CalledByNative("DequeueOutputResult")
- private int flags() { return mFlags; }
-
- @CalledByNative("DequeueOutputResult")
- private int offset() { return mOffset; }
-
- @CalledByNative("DequeueOutputResult")
- private long presentationTimeMicroseconds() { return mPresentationTimeMicroseconds; }
-
- @CalledByNative("DequeueOutputResult")
- private int numBytes() { return mNumBytes; }
- }
-
- private MediaCodecBridge(String mime) {
- mMediaCodec = MediaCodec.createDecoderByType(mime);
- mLastPresentationTimeUs = 0;
- mFlushed = true;
- }
-
- @CalledByNative
- private static MediaCodecBridge create(String mime) {
- return new MediaCodecBridge(mime);
- }
-
- @CalledByNative
- private void release() {
- mMediaCodec.release();
- if (mAudioTrack != null) {
- mAudioTrack.release();
- }
- }
-
- @CalledByNative
- private void start() {
- mMediaCodec.start();
- mInputBuffers = mMediaCodec.getInputBuffers();
- }
-
- @CalledByNative
- private int dequeueInputBuffer(long timeoutUs) {
- try {
- return mMediaCodec.dequeueInputBuffer(timeoutUs);
- } catch(Exception e) {
- Log.e(TAG, "Cannot dequeue Input buffer " + e.toString());
- }
- return MEDIA_CODEC_ERROR;
- }
-
- @CalledByNative
- private int flush() {
- try {
- mFlushed = true;
- if (mAudioTrack != null) {
- mAudioTrack.flush();
- }
- mMediaCodec.flush();
- } catch(IllegalStateException e) {
- Log.e(TAG, "Failed to flush MediaCodec " + e.toString());
- return MEDIA_CODEC_ERROR;
- }
- return MEDIA_CODEC_OK;
- }
-
- @CalledByNative
- private void stop() {
- mMediaCodec.stop();
- if (mAudioTrack != null) {
- mAudioTrack.pause();
- }
- }
-
- @CalledByNative
- private int getOutputHeight() {
- return mMediaCodec.getOutputFormat().getInteger(MediaFormat.KEY_HEIGHT);
- }
-
- @CalledByNative
- private int getOutputWidth() {
- return mMediaCodec.getOutputFormat().getInteger(MediaFormat.KEY_WIDTH);
- }
-
- @CalledByNative
- private ByteBuffer getInputBuffer(int index) {
- return mInputBuffers[index];
- }
-
- @CalledByNative
- private ByteBuffer getOutputBuffer(int index) {
- return mOutputBuffers[index];
- }
-
- @CalledByNative
- private void queueInputBuffer(
- int index, int offset, int size, long presentationTimeUs, int flags) {
- resetLastPresentationTimeIfNeeded(presentationTimeUs);
- try {
- mMediaCodec.queueInputBuffer(index, offset, size, presentationTimeUs, flags);
- } catch(IllegalStateException e) {
- Log.e(TAG, "Failed to queue input buffer " + e.toString());
- }
- }
-
- @CalledByNative
- private void queueSecureInputBuffer(
- int index, int offset, byte[] iv, byte[] keyId, int[] numBytesOfClearData,
- int[] numBytesOfEncryptedData, int numSubSamples, long presentationTimeUs) {
- resetLastPresentationTimeIfNeeded(presentationTimeUs);
- try {
- MediaCodec.CryptoInfo cryptoInfo = new MediaCodec.CryptoInfo();
- cryptoInfo.set(numSubSamples, numBytesOfClearData, numBytesOfEncryptedData,
- keyId, iv, MediaCodec.CRYPTO_MODE_AES_CTR);
- mMediaCodec.queueSecureInputBuffer(index, offset, cryptoInfo, presentationTimeUs, 0);
- } catch(IllegalStateException e) {
- Log.e(TAG, "Failed to queue secure input buffer " + e.toString());
- }
- }
-
- @CalledByNative
- private void releaseOutputBuffer(int index, boolean render) {
- mMediaCodec.releaseOutputBuffer(index, render);
- }
-
- @CalledByNative
- private void getOutputBuffers() {
- mOutputBuffers = mMediaCodec.getOutputBuffers();
- }
-
- @CalledByNative
- private DequeueOutputResult dequeueOutputBuffer(long timeoutUs) {
- MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
- int index = MEDIA_CODEC_ERROR;
- try {
- index = mMediaCodec.dequeueOutputBuffer(info, timeoutUs);
- if (info.presentationTimeUs < mLastPresentationTimeUs) {
- // TODO(qinmin): return a special code through DequeueOutputResult
- // to notify the native code the the frame has a wrong presentation
- // timestamp and should be skipped.
- info.presentationTimeUs = mLastPresentationTimeUs;
- }
- mLastPresentationTimeUs = info.presentationTimeUs;
- } catch (IllegalStateException e) {
- Log.e(TAG, "Cannot dequeue output buffer " + e.toString());
- }
- return new DequeueOutputResult(
- index, info.flags, info.offset, info.presentationTimeUs, info.size);
- }
-
- @CalledByNative
- private boolean configureVideo(MediaFormat format, Surface surface, MediaCrypto crypto,
- int flags) {
- try {
- mMediaCodec.configure(format, surface, crypto, flags);
- return true;
- } catch (IllegalStateException e) {
- Log.e(TAG, "Cannot configure the video codec " + e.toString());
- }
- return false;
- }
-
- @CalledByNative
- private static MediaFormat createAudioFormat(String mime, int SampleRate, int ChannelCount) {
- return MediaFormat.createAudioFormat(mime, SampleRate, ChannelCount);
- }
-
- @CalledByNative
- private static MediaFormat createVideoFormat(String mime, int width, int height) {
- return MediaFormat.createVideoFormat(mime, width, height);
- }
-
- @CalledByNative
- private static void setCodecSpecificData(MediaFormat format, int index, byte[] bytes) {
- String name = null;
- if (index == 0) {
- name = "csd-0";
- } else if (index == 1) {
- name = "csd-1";
- }
- if (name != null) {
- format.setByteBuffer(name, ByteBuffer.wrap(bytes));
- }
- }
-
- @CalledByNative
- private static void setFrameHasADTSHeader(MediaFormat format) {
- format.setInteger(MediaFormat.KEY_IS_ADTS, 1);
- }
-
- @CalledByNative
- private boolean configureAudio(MediaFormat format, MediaCrypto crypto, int flags,
- boolean playAudio) {
- try {
- mMediaCodec.configure(format, null, crypto, flags);
- if (playAudio) {
- int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
- int channelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
- int channelConfig = (channelCount == 1) ? AudioFormat.CHANNEL_OUT_MONO :
- AudioFormat.CHANNEL_OUT_STEREO;
- // Using 16bit PCM for output. Keep this value in sync with
- // kBytesPerAudioOutputSample in media_codec_bridge.cc.
- int minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig,
- AudioFormat.ENCODING_PCM_16BIT);
- mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig,
- AudioFormat.ENCODING_PCM_16BIT, minBufferSize, AudioTrack.MODE_STREAM);
- }
- return true;
- } catch (IllegalStateException e) {
- Log.e(TAG, "Cannot configure the audio codec " + e.toString());
- }
- return false;
- }
-
- @CalledByNative
- private void playOutputBuffer(byte[] buf) {
- if (mAudioTrack != null) {
- if (AudioTrack.PLAYSTATE_PLAYING != mAudioTrack.getPlayState()) {
- mAudioTrack.play();
- }
- int size = mAudioTrack.write(buf, 0, buf.length);
- if (buf.length != size) {
- Log.i(TAG, "Failed to send all data to audio output, expected size: " +
- buf.length + ", actual size: " + size);
- }
- }
- }
-
- @CalledByNative
- private void setVolume(double volume) {
- if (mAudioTrack != null) {
- mAudioTrack.setStereoVolume((float) volume, (float) volume);
- }
- }
-
- private void resetLastPresentationTimeIfNeeded(long presentationTimeUs) {
- if (mFlushed) {
- mLastPresentationTimeUs =
- Math.max(presentationTimeUs - MAX_PRESENTATION_TIMESTAMP_SHIFT_US, 0);
- mFlushed = false;
- }
- }
-}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java b/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
deleted file mode 100644
index 4b0a1aa6d1a..00000000000
--- a/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.media;
-
-import android.content.Context;
-import android.media.MediaPlayer;
-import android.net.Uri;
-import android.text.TextUtils;
-import android.util.Log;
-import android.view.Surface;
-
-import org.chromium.base.CalledByNative;
-import org.chromium.base.JNINamespace;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.HashMap;
-import java.util.Map;
-
-// A wrapper around android.media.MediaPlayer that allows the native code to use it.
-// See media/base/android/media_player_bridge.cc for the corresponding native code.
-@JNINamespace("media")
-public class MediaPlayerBridge {
-
- private static final String TAG = "MediaPlayerBridge";
-
- // Local player to forward this to. We don't initialize it here since the subclass might not
- // want it.
- private MediaPlayer mPlayer;
-
- @CalledByNative
- private static MediaPlayerBridge create() {
- return new MediaPlayerBridge();
- }
-
- protected MediaPlayer getLocalPlayer() {
- if (mPlayer == null) {
- mPlayer = new MediaPlayer();
- }
- return mPlayer;
- }
-
- @CalledByNative
- protected void setSurface(Surface surface) {
- getLocalPlayer().setSurface(surface);
- }
-
- @CalledByNative
- protected void prepareAsync() throws IllegalStateException {
- getLocalPlayer().prepareAsync();
- }
-
- @CalledByNative
- protected boolean isPlaying() {
- return getLocalPlayer().isPlaying();
- }
-
- @CalledByNative
- protected int getVideoWidth() {
- return getLocalPlayer().getVideoWidth();
- }
-
- @CalledByNative
- protected int getVideoHeight() {
- return getLocalPlayer().getVideoHeight();
- }
-
- @CalledByNative
- protected int getCurrentPosition() {
- return getLocalPlayer().getCurrentPosition();
- }
-
- @CalledByNative
- protected int getDuration() {
- return getLocalPlayer().getDuration();
- }
-
- @CalledByNative
- protected void release() {
- getLocalPlayer().release();
- }
-
- @CalledByNative
- protected void setVolume(double volume) {
- getLocalPlayer().setVolume((float) volume, (float) volume);
- }
-
- @CalledByNative
- protected void start() {
- getLocalPlayer().start();
- }
-
- @CalledByNative
- protected void pause() {
- getLocalPlayer().pause();
- }
-
- @CalledByNative
- protected void seekTo(int msec) throws IllegalStateException {
- getLocalPlayer().seekTo(msec);
- }
-
- @CalledByNative
- protected boolean setDataSource(
- Context context, String url, String cookies, boolean hideUrlLog) {
- Uri uri = Uri.parse(url);
- HashMap<String, String> headersMap = new HashMap<String, String>();
- if (hideUrlLog)
- headersMap.put("x-hide-urls-from-log", "true");
- if (!TextUtils.isEmpty(cookies))
- headersMap.put("Cookie", cookies);
- try {
- getLocalPlayer().setDataSource(context, uri, headersMap);
- return true;
- } catch (Exception e) {
- return false;
- }
- }
-
- protected void setOnBufferingUpdateListener(MediaPlayer.OnBufferingUpdateListener listener) {
- getLocalPlayer().setOnBufferingUpdateListener(listener);
- }
-
- protected void setOnCompletionListener(MediaPlayer.OnCompletionListener listener) {
- getLocalPlayer().setOnCompletionListener(listener);
- }
-
- protected void setOnErrorListener(MediaPlayer.OnErrorListener listener) {
- getLocalPlayer().setOnErrorListener(listener);
- }
-
- protected void setOnPreparedListener(MediaPlayer.OnPreparedListener listener) {
- getLocalPlayer().setOnPreparedListener(listener);
- }
-
- protected void setOnSeekCompleteListener(MediaPlayer.OnSeekCompleteListener listener) {
- getLocalPlayer().setOnSeekCompleteListener(listener);
- }
-
- protected void setOnVideoSizeChangedListener(MediaPlayer.OnVideoSizeChangedListener listener) {
- getLocalPlayer().setOnVideoSizeChangedListener(listener);
- }
-
- private static class AllowedOperations {
- private final boolean mCanPause;
- private final boolean mCanSeekForward;
- private final boolean mCanSeekBackward;
-
- private AllowedOperations(boolean canPause, boolean canSeekForward,
- boolean canSeekBackward) {
- mCanPause = canPause;
- mCanSeekForward = canSeekForward;
- mCanSeekBackward = canSeekBackward;
- }
-
- @CalledByNative("AllowedOperations")
- private boolean canPause() { return mCanPause; }
-
- @CalledByNative("AllowedOperations")
- private boolean canSeekForward() { return mCanSeekForward; }
-
- @CalledByNative("AllowedOperations")
- private boolean canSeekBackward() { return mCanSeekBackward; }
- }
-
- /**
- * Returns an AllowedOperations object to show all the operations that are
- * allowed on the media player.
- */
- @CalledByNative
- private static AllowedOperations getAllowedOperations(MediaPlayer player) {
- boolean canPause = true;
- boolean canSeekForward = true;
- boolean canSeekBackward = true;
- try {
- Method getMetadata = player.getClass().getDeclaredMethod(
- "getMetadata", boolean.class, boolean.class);
- getMetadata.setAccessible(true);
- Object data = getMetadata.invoke(player, false, false);
- if (data != null) {
- Class<?> metadataClass = data.getClass();
- Method hasMethod = metadataClass.getDeclaredMethod("has", int.class);
- Method getBooleanMethod = metadataClass.getDeclaredMethod("getBoolean", int.class);
-
- int pause = (Integer) metadataClass.getField("PAUSE_AVAILABLE").get(null);
- int seekForward =
- (Integer) metadataClass.getField("SEEK_FORWARD_AVAILABLE").get(null);
- int seekBackward =
- (Integer) metadataClass.getField("SEEK_BACKWARD_AVAILABLE").get(null);
- hasMethod.setAccessible(true);
- getBooleanMethod.setAccessible(true);
- canPause = !((Boolean) hasMethod.invoke(data, pause))
- || ((Boolean) getBooleanMethod.invoke(data, pause));
- canSeekForward = !((Boolean) hasMethod.invoke(data, seekForward))
- || ((Boolean) getBooleanMethod.invoke(data, seekForward));
- canSeekBackward = !((Boolean) hasMethod.invoke(data, seekBackward))
- || ((Boolean) getBooleanMethod.invoke(data, seekBackward));
- }
- } catch (NoSuchMethodException e) {
- Log.e(TAG, "Cannot find getMetadata() method: " + e);
- } catch (InvocationTargetException e) {
- Log.e(TAG, "Cannot invoke MediaPlayer.getMetadata() method: " + e);
- } catch (IllegalAccessException e) {
- Log.e(TAG, "Cannot access metadata: " + e);
- } catch (NoSuchFieldException e) {
- Log.e(TAG, "Cannot find matching fields in Metadata class: " + e);
- }
- return new AllowedOperations(canPause, canSeekForward, canSeekBackward);
- }
-}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java b/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java
deleted file mode 100644
index 3c68589844e..00000000000
--- a/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.media;
-
-import android.Manifest.permission;
-import android.content.Context;
-import android.content.pm.PackageManager;
-import android.media.AudioManager;
-import android.media.MediaPlayer;
-
-import org.chromium.base.CalledByNative;
-import org.chromium.base.JNINamespace;
-
-// This class implements all the listener interface for android mediaplayer.
-// Callbacks will be sent to the native class for processing.
-@JNINamespace("media")
-class MediaPlayerListener implements MediaPlayer.OnPreparedListener,
- MediaPlayer.OnCompletionListener,
- MediaPlayer.OnBufferingUpdateListener,
- MediaPlayer.OnSeekCompleteListener,
- MediaPlayer.OnVideoSizeChangedListener,
- MediaPlayer.OnErrorListener,
- AudioManager.OnAudioFocusChangeListener {
- // These values are mirrored as enums in media/base/android/media_player_bridge.h.
- // Please ensure they stay in sync.
- private static final int MEDIA_ERROR_FORMAT = 0;
- private static final int MEDIA_ERROR_DECODE = 1;
- private static final int MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK = 2;
- private static final int MEDIA_ERROR_INVALID_CODE = 3;
-
- // These values are copied from android media player.
- public static final int MEDIA_ERROR_MALFORMED = -1007;
- public static final int MEDIA_ERROR_TIMED_OUT = -110;
-
- // Used to determine the class instance to dispatch the native call to.
- private int mNativeMediaPlayerListener = 0;
- private final Context mContext;
-
- private MediaPlayerListener(int nativeMediaPlayerListener, Context context) {
- mNativeMediaPlayerListener = nativeMediaPlayerListener;
- mContext = context;
- }
-
- @Override
- public boolean onError(MediaPlayer mp, int what, int extra) {
- int errorType;
- switch (what) {
- case MediaPlayer.MEDIA_ERROR_UNKNOWN:
- switch (extra) {
- case MEDIA_ERROR_MALFORMED:
- errorType = MEDIA_ERROR_DECODE;
- break;
- case MEDIA_ERROR_TIMED_OUT:
- errorType = MEDIA_ERROR_INVALID_CODE;
- break;
- default:
- errorType = MEDIA_ERROR_FORMAT;
- break;
- }
- break;
- case MediaPlayer.MEDIA_ERROR_SERVER_DIED:
- errorType = MEDIA_ERROR_DECODE;
- break;
- case MediaPlayer.MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK:
- errorType = MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK;
- break;
- default:
- // There are some undocumented error codes for android media player.
- // For example, when surfaceTexture got deleted before we setVideoSuface
- // to NULL, mediaplayer will report error -38. These errors should be ignored
- // and not be treated as an error to webkit.
- errorType = MEDIA_ERROR_INVALID_CODE;
- break;
- }
- nativeOnMediaError(mNativeMediaPlayerListener, errorType);
- return true;
- }
-
- @Override
- public void onVideoSizeChanged(MediaPlayer mp, int width, int height) {
- nativeOnVideoSizeChanged(mNativeMediaPlayerListener, width, height);
- }
-
- @Override
- public void onSeekComplete(MediaPlayer mp) {
- nativeOnSeekComplete(mNativeMediaPlayerListener);
- }
-
- @Override
- public void onBufferingUpdate(MediaPlayer mp, int percent) {
- nativeOnBufferingUpdate(mNativeMediaPlayerListener, percent);
- }
-
- @Override
- public void onCompletion(MediaPlayer mp) {
- nativeOnPlaybackComplete(mNativeMediaPlayerListener);
- }
-
- @Override
- public void onPrepared(MediaPlayer mp) {
- nativeOnMediaPrepared(mNativeMediaPlayerListener);
- }
-
- @Override
- public void onAudioFocusChange(int focusChange) {
- if (focusChange == AudioManager.AUDIOFOCUS_LOSS ||
- focusChange == AudioManager.AUDIOFOCUS_LOSS_TRANSIENT) {
- nativeOnMediaInterrupted(mNativeMediaPlayerListener);
- }
- }
-
- @CalledByNative
- public void releaseResources() {
- if (mContext != null) {
- // Unregister the wish for audio focus.
- AudioManager am = (AudioManager) mContext.getSystemService(Context.AUDIO_SERVICE);
- if (am != null) {
- am.abandonAudioFocus(this);
- }
- }
- }
-
- @CalledByNative
- private static MediaPlayerListener create(int nativeMediaPlayerListener,
- Context context, MediaPlayerBridge mediaPlayerBridge) {
- final MediaPlayerListener listener =
- new MediaPlayerListener(nativeMediaPlayerListener, context);
- mediaPlayerBridge.setOnBufferingUpdateListener(listener);
- mediaPlayerBridge.setOnCompletionListener(listener);
- mediaPlayerBridge.setOnErrorListener(listener);
- mediaPlayerBridge.setOnPreparedListener(listener);
- mediaPlayerBridge.setOnSeekCompleteListener(listener);
- mediaPlayerBridge.setOnVideoSizeChangedListener(listener);
-
- AudioManager am = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
- am.requestAudioFocus(
- listener,
- AudioManager.STREAM_MUSIC,
-
- // Request permanent focus.
- AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_MAY_DUCK);
- return listener;
- }
-
- /**
- * See media/base/android/media_player_listener.cc for all the following functions.
- */
- private native void nativeOnMediaError(
- int nativeMediaPlayerListener,
- int errorType);
-
- private native void nativeOnVideoSizeChanged(
- int nativeMediaPlayerListener,
- int width, int height);
-
- private native void nativeOnBufferingUpdate(
- int nativeMediaPlayerListener,
- int percent);
-
- private native void nativeOnMediaPrepared(int nativeMediaPlayerListener);
-
- private native void nativeOnPlaybackComplete(int nativeMediaPlayerListener);
-
- private native void nativeOnSeekComplete(int nativeMediaPlayerListener);
-
- private native void nativeOnMediaInterrupted(int nativeMediaPlayerListener);
-}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java b/chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java
deleted file mode 100644
index df9eb4dcffb..00000000000
--- a/chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java
+++ /dev/null
@@ -1,435 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.media;
-
-import android.content.Context;
-import android.graphics.ImageFormat;
-import android.graphics.SurfaceTexture;
-import android.graphics.SurfaceTexture.OnFrameAvailableListener;
-import android.hardware.Camera;
-import android.hardware.Camera.PreviewCallback;
-import android.opengl.GLES20;
-import android.util.Log;
-import android.view.Surface;
-import android.view.WindowManager;
-
-import java.io.IOException;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.Iterator;
-import java.util.List;
-
-import org.chromium.base.CalledByNative;
-import org.chromium.base.JNINamespace;
-
-@JNINamespace("media")
-public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
- static class CaptureCapability {
- public int mWidth = 0;
- public int mHeight = 0;
- public int mDesiredFps = 0;
- }
-
- // Some devices with OS older than JELLY_BEAN don't support YV12 format correctly.
- // Some devices don't support YV12 format correctly even with JELLY_BEAN or newer OS.
- // To work around the issues on those devices, we'd have to request NV21.
- // This is a temporary hack till device manufacturers fix the problem or
- // we don't need to support those devices any more.
- private static class DeviceImageFormatHack {
- private static final String[] sBUGGY_DEVICE_LIST = {
- "SAMSUNG-SGH-I747",
- };
-
- static int getImageFormat() {
- if (android.os.Build.VERSION.SDK_INT < android.os.Build.VERSION_CODES.JELLY_BEAN) {
- return ImageFormat.NV21;
- }
-
- for (String buggyDevice : sBUGGY_DEVICE_LIST) {
- if (buggyDevice.contentEquals(android.os.Build.MODEL)) {
- return ImageFormat.NV21;
- }
- }
-
- return ImageFormat.YV12;
- }
- }
-
- private Camera mCamera;
- public ReentrantLock mPreviewBufferLock = new ReentrantLock();
- private int mImageFormat = ImageFormat.YV12;
- private byte[] mColorPlane = null;
- private Context mContext = null;
- // True when native code has started capture.
- private boolean mIsRunning = false;
-
- private static final int NUM_CAPTURE_BUFFERS = 3;
- private int mExpectedFrameSize = 0;
- private int mId = 0;
- // Native callback context variable.
- private int mNativeVideoCaptureDeviceAndroid = 0;
- private int[] mGlTextures = null;
- private SurfaceTexture mSurfaceTexture = null;
- private static final int GL_TEXTURE_EXTERNAL_OES = 0x8D65;
-
- private int mCameraOrientation = 0;
- private int mCameraFacing = 0;
- private int mDeviceOrientation = 0;
-
- CaptureCapability mCurrentCapability = null;
- private static final String TAG = "VideoCapture";
-
- @CalledByNative
- public static VideoCapture createVideoCapture(
- Context context, int id, int nativeVideoCaptureDeviceAndroid) {
- return new VideoCapture(context, id, nativeVideoCaptureDeviceAndroid);
- }
-
- public VideoCapture(
- Context context, int id, int nativeVideoCaptureDeviceAndroid) {
- mContext = context;
- mId = id;
- mNativeVideoCaptureDeviceAndroid = nativeVideoCaptureDeviceAndroid;
- }
-
- // Returns true on success, false otherwise.
- @CalledByNative
- public boolean allocate(int width, int height, int frameRate) {
- Log.d(TAG, "allocate: requested width=" + width +
- ", height=" + height + ", frameRate=" + frameRate);
- try {
- mCamera = Camera.open(mId);
- } catch (RuntimeException ex) {
- Log.e(TAG, "allocate:Camera.open: " + ex);
- return false;
- }
-
- try {
- Camera.CameraInfo camera_info = new Camera.CameraInfo();
- Camera.getCameraInfo(mId, camera_info);
- mCameraOrientation = camera_info.orientation;
- mCameraFacing = camera_info.facing;
- mDeviceOrientation = getDeviceOrientation();
- Log.d(TAG, "allocate: device orientation=" + mDeviceOrientation +
- ", camera orientation=" + mCameraOrientation +
- ", facing=" + mCameraFacing);
-
- Camera.Parameters parameters = mCamera.getParameters();
-
- // Calculate fps.
- List<int[]> listFpsRange = parameters.getSupportedPreviewFpsRange();
- if (listFpsRange == null || listFpsRange.size() == 0) {
- Log.e(TAG, "allocate: no fps range found");
- return false;
- }
- int frameRateInMs = frameRate * 1000;
- Iterator itFpsRange = listFpsRange.iterator();
- int[] fpsRange = (int[])itFpsRange.next();
- // Use the first range as default.
- int fpsMin = fpsRange[0];
- int fpsMax = fpsRange[1];
- int newFrameRate = (fpsMin + 999) / 1000;
- while (itFpsRange.hasNext()) {
- fpsRange = (int[])itFpsRange.next();
- if (fpsRange[0] <= frameRateInMs &&
- frameRateInMs <= fpsRange[1]) {
- fpsMin = fpsRange[0];
- fpsMax = fpsRange[1];
- newFrameRate = frameRate;
- break;
- }
- }
- frameRate = newFrameRate;
- Log.d(TAG, "allocate: fps set to " + frameRate);
-
- mCurrentCapability = new CaptureCapability();
- mCurrentCapability.mDesiredFps = frameRate;
-
- // Calculate size.
- List<Camera.Size> listCameraSize =
- parameters.getSupportedPreviewSizes();
- int minDiff = Integer.MAX_VALUE;
- int matchedWidth = width;
- int matchedHeight = height;
- Iterator itCameraSize = listCameraSize.iterator();
- while (itCameraSize.hasNext()) {
- Camera.Size size = (Camera.Size)itCameraSize.next();
- int diff = Math.abs(size.width - width) +
- Math.abs(size.height - height);
- Log.d(TAG, "allocate: support resolution (" +
- size.width + ", " + size.height + "), diff=" + diff);
- // TODO(wjia): Remove this hack (forcing width to be multiple
- // of 32) by supporting stride in video frame buffer.
- // Right now, VideoCaptureController requires compact YV12
- // (i.e., with no padding).
- if (diff < minDiff && (size.width % 32 == 0)) {
- minDiff = diff;
- matchedWidth = size.width;
- matchedHeight = size.height;
- }
- }
- if (minDiff == Integer.MAX_VALUE) {
- Log.e(TAG, "allocate: can not find a resolution whose width " +
- "is multiple of 32");
- return false;
- }
- mCurrentCapability.mWidth = matchedWidth;
- mCurrentCapability.mHeight = matchedHeight;
- Log.d(TAG, "allocate: matched width=" + matchedWidth +
- ", height=" + matchedHeight);
-
- calculateImageFormat(matchedWidth, matchedHeight);
-
- parameters.setPreviewSize(matchedWidth, matchedHeight);
- parameters.setPreviewFormat(mImageFormat);
- parameters.setPreviewFpsRange(fpsMin, fpsMax);
- mCamera.setParameters(parameters);
-
- // Set SurfaceTexture.
- mGlTextures = new int[1];
- // Generate one texture pointer and bind it as an external texture.
- GLES20.glGenTextures(1, mGlTextures, 0);
- GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, mGlTextures[0]);
- // No mip-mapping with camera source.
- GLES20.glTexParameterf(GL_TEXTURE_EXTERNAL_OES,
- GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_LINEAR);
- GLES20.glTexParameterf(GL_TEXTURE_EXTERNAL_OES,
- GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR);
- // Clamp to edge is only option.
- GLES20.glTexParameteri(GL_TEXTURE_EXTERNAL_OES,
- GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
- GLES20.glTexParameteri(GL_TEXTURE_EXTERNAL_OES,
- GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
-
- mSurfaceTexture = new SurfaceTexture(mGlTextures[0]);
- mSurfaceTexture.setOnFrameAvailableListener(null);
-
- mCamera.setPreviewTexture(mSurfaceTexture);
-
- int bufSize = matchedWidth * matchedHeight *
- ImageFormat.getBitsPerPixel(mImageFormat) / 8;
- for (int i = 0; i < NUM_CAPTURE_BUFFERS; i++) {
- byte[] buffer = new byte[bufSize];
- mCamera.addCallbackBuffer(buffer);
- }
- mExpectedFrameSize = bufSize;
- } catch (IOException ex) {
- Log.e(TAG, "allocate: " + ex);
- return false;
- }
-
- return true;
- }
-
- @CalledByNative
- public int queryWidth() {
- return mCurrentCapability.mWidth;
- }
-
- @CalledByNative
- public int queryHeight() {
- return mCurrentCapability.mHeight;
- }
-
- @CalledByNative
- public int queryFrameRate() {
- return mCurrentCapability.mDesiredFps;
- }
-
- @CalledByNative
- public int startCapture() {
- if (mCamera == null) {
- Log.e(TAG, "startCapture: camera is null");
- return -1;
- }
-
- mPreviewBufferLock.lock();
- try {
- if (mIsRunning) {
- return 0;
- }
- mIsRunning = true;
- } finally {
- mPreviewBufferLock.unlock();
- }
- mCamera.setPreviewCallbackWithBuffer(this);
- mCamera.startPreview();
- return 0;
- }
-
- @CalledByNative
- public int stopCapture() {
- if (mCamera == null) {
- Log.e(TAG, "stopCapture: camera is null");
- return 0;
- }
-
- mPreviewBufferLock.lock();
- try {
- if (!mIsRunning) {
- return 0;
- }
- mIsRunning = false;
- } finally {
- mPreviewBufferLock.unlock();
- }
-
- mCamera.stopPreview();
- mCamera.setPreviewCallbackWithBuffer(null);
- return 0;
- }
-
- @CalledByNative
- public void deallocate() {
- if (mCamera == null)
- return;
-
- stopCapture();
- try {
- mCamera.setPreviewTexture(null);
- if (mGlTextures != null)
- GLES20.glDeleteTextures(1, mGlTextures, 0);
- mCurrentCapability = null;
- mCamera.release();
- mCamera = null;
- } catch (IOException ex) {
- Log.e(TAG, "deallocate: failed to deallocate camera, " + ex);
- return;
- }
- }
-
- @Override
- public void onPreviewFrame(byte[] data, Camera camera) {
- mPreviewBufferLock.lock();
- try {
- if (!mIsRunning) {
- return;
- }
- if (data.length == mExpectedFrameSize) {
- int rotation = getDeviceOrientation();
- if (rotation != mDeviceOrientation) {
- mDeviceOrientation = rotation;
- Log.d(TAG,
- "onPreviewFrame: device orientation=" +
- mDeviceOrientation + ", camera orientation=" +
- mCameraOrientation);
- }
- boolean flipVertical = false;
- boolean flipHorizontal = false;
- if (mCameraFacing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
- rotation = (mCameraOrientation + rotation) % 360;
- rotation = (360 - rotation) % 360;
- flipHorizontal = (rotation == 270 || rotation == 90);
- flipVertical = flipHorizontal;
- } else {
- rotation = (mCameraOrientation - rotation + 360) % 360;
- }
- if (mImageFormat == ImageFormat.NV21) {
- convertNV21ToYV12(data);
- }
- nativeOnFrameAvailable(mNativeVideoCaptureDeviceAndroid,
- data, mExpectedFrameSize,
- rotation, flipVertical, flipHorizontal);
- }
- } finally {
- mPreviewBufferLock.unlock();
- if (camera != null) {
- camera.addCallbackBuffer(data);
- }
- }
- }
-
- // TODO(wjia): investigate whether reading from texture could give better
- // performance and frame rate.
- @Override
- public void onFrameAvailable(SurfaceTexture surfaceTexture) { }
-
- private static class ChromiumCameraInfo {
- private final int mId;
- private final Camera.CameraInfo mCameraInfo;
-
- private ChromiumCameraInfo(int index) {
- mId = index;
- mCameraInfo = new Camera.CameraInfo();
- Camera.getCameraInfo(index, mCameraInfo);
- }
-
- @CalledByNative("ChromiumCameraInfo")
- private static int getNumberOfCameras() {
- return Camera.getNumberOfCameras();
- }
-
- @CalledByNative("ChromiumCameraInfo")
- private static ChromiumCameraInfo getAt(int index) {
- return new ChromiumCameraInfo(index);
- }
-
- @CalledByNative("ChromiumCameraInfo")
- private int getId() {
- return mId;
- }
-
- @CalledByNative("ChromiumCameraInfo")
- private String getDeviceName() {
- return "camera " + mId + ", facing " +
- (mCameraInfo.facing ==
- Camera.CameraInfo.CAMERA_FACING_FRONT ? "front" : "back");
- }
-
- @CalledByNative("ChromiumCameraInfo")
- private int getOrientation() {
- return mCameraInfo.orientation;
- }
- }
-
- private native void nativeOnFrameAvailable(
- int nativeVideoCaptureDeviceAndroid,
- byte[] data,
- int length,
- int rotation,
- boolean flipVertical,
- boolean flipHorizontal);
-
- private int getDeviceOrientation() {
- int orientation = 0;
- if (mContext != null) {
- WindowManager wm = (WindowManager)mContext.getSystemService(
- Context.WINDOW_SERVICE);
- switch(wm.getDefaultDisplay().getRotation()) {
- case Surface.ROTATION_90:
- orientation = 90;
- break;
- case Surface.ROTATION_180:
- orientation = 180;
- break;
- case Surface.ROTATION_270:
- orientation = 270;
- break;
- case Surface.ROTATION_0:
- default:
- orientation = 0;
- break;
- }
- }
- return orientation;
- }
-
- private void calculateImageFormat(int width, int height) {
- mImageFormat = DeviceImageFormatHack.getImageFormat();
- if (mImageFormat == ImageFormat.NV21) {
- mColorPlane = new byte[width * height / 4];
- }
- }
-
- private void convertNV21ToYV12(byte[] data) {
- final int ySize = mCurrentCapability.mWidth * mCurrentCapability.mHeight;
- final int uvSize = ySize / 4;
- for (int i = 0; i < uvSize; i++) {
- final int index = ySize + i * 2;
- data[ySize + i] = data[index];
- mColorPlane[i] = data[index + 1];
- }
- System.arraycopy(mColorPlane, 0, data, ySize + uvSize, uvSize);
- }
-}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java b/chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java
deleted file mode 100644
index 1de7e42b8d2..00000000000
--- a/chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.media;
-
-import android.content.Context;
-import android.media.AudioFormat;
-import android.media.MediaCodec;
-import android.media.MediaCodec.BufferInfo;
-import android.media.MediaExtractor;
-import android.media.MediaFormat;
-import android.os.ParcelFileDescriptor;
-import android.util.Log;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-
-import org.chromium.base.CalledByNative;
-import org.chromium.base.JNINamespace;
-
-@JNINamespace("media")
-class WebAudioMediaCodecBridge {
- private static final boolean DEBUG = true;
- static final String LOG_TAG = "WebAudioMediaCodec";
- // TODO(rtoy): What is the correct timeout value for reading
- // from a file in memory?
- static final long TIMEOUT_MICROSECONDS = 500;
- @CalledByNative
- private static String CreateTempFile(Context ctx) throws java.io.IOException {
- File outputDirectory = ctx.getCacheDir();
- File outputFile = File.createTempFile("webaudio", ".dat", outputDirectory);
- return outputFile.getAbsolutePath();
- }
-
- @CalledByNative
- private static boolean decodeAudioFile(Context ctx,
- int nativeMediaCodecBridge,
- int inputFD,
- long dataSize) {
-
- if (dataSize < 0 || dataSize > 0x7fffffff)
- return false;
-
- MediaExtractor extractor = new MediaExtractor();
-
- ParcelFileDescriptor encodedFD;
- encodedFD = ParcelFileDescriptor.adoptFd(inputFD);
- try {
- extractor.setDataSource(encodedFD.getFileDescriptor(), 0, dataSize);
- } catch (Exception e) {
- e.printStackTrace();
- encodedFD.detachFd();
- return false;
- }
-
- if (extractor.getTrackCount() <= 0) {
- encodedFD.detachFd();
- return false;
- }
-
- MediaFormat format = extractor.getTrackFormat(0);
-
- // Number of channels specified in the file
- int inputChannelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
-
- // Number of channels the decoder will provide. (Not
- // necessarily the same as inputChannelCount. See
- // crbug.com/266006.)
- int outputChannelCount = inputChannelCount;
-
- int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
- String mime = format.getString(MediaFormat.KEY_MIME);
-
- long durationMicroseconds = 0;
- if (format.containsKey(MediaFormat.KEY_DURATION)) {
- try {
- durationMicroseconds = format.getLong(MediaFormat.KEY_DURATION);
- } catch (Exception e) {
- Log.d(LOG_TAG, "Cannot get duration");
- }
- }
-
- if (DEBUG) {
- Log.d(LOG_TAG, "Tracks: " + extractor.getTrackCount()
- + " Rate: " + sampleRate
- + " Channels: " + inputChannelCount
- + " Mime: " + mime
- + " Duration: " + durationMicroseconds + " microsec");
- }
-
- nativeInitializeDestination(nativeMediaCodecBridge,
- inputChannelCount,
- sampleRate,
- durationMicroseconds);
-
- // Create decoder
- MediaCodec codec = MediaCodec.createDecoderByType(mime);
- codec.configure(format, null /* surface */, null /* crypto */, 0 /* flags */);
- codec.start();
-
- ByteBuffer[] codecInputBuffers = codec.getInputBuffers();
- ByteBuffer[] codecOutputBuffers = codec.getOutputBuffers();
-
- // A track must be selected and will be used to read samples.
- extractor.selectTrack(0);
-
- boolean sawInputEOS = false;
- boolean sawOutputEOS = false;
-
- // Keep processing until the output is done.
- while (!sawOutputEOS) {
- if (!sawInputEOS) {
- // Input side
- int inputBufIndex = codec.dequeueInputBuffer(TIMEOUT_MICROSECONDS);
-
- if (inputBufIndex >= 0) {
- ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
- int sampleSize = extractor.readSampleData(dstBuf, 0);
- long presentationTimeMicroSec = 0;
-
- if (sampleSize < 0) {
- sawInputEOS = true;
- sampleSize = 0;
- } else {
- presentationTimeMicroSec = extractor.getSampleTime();
- }
-
- codec.queueInputBuffer(inputBufIndex,
- 0, /* offset */
- sampleSize,
- presentationTimeMicroSec,
- sawInputEOS ? MediaCodec.BUFFER_FLAG_END_OF_STREAM : 0);
-
- if (!sawInputEOS) {
- extractor.advance();
- }
- }
- }
-
- // Output side
- MediaCodec.BufferInfo info = new BufferInfo();
- final int outputBufIndex = codec.dequeueOutputBuffer(info, TIMEOUT_MICROSECONDS);
-
- if (outputBufIndex >= 0) {
- ByteBuffer buf = codecOutputBuffers[outputBufIndex];
-
- if (info.size > 0) {
- nativeOnChunkDecoded(nativeMediaCodecBridge, buf, info.size,
- inputChannelCount, outputChannelCount);
- }
-
- buf.clear();
- codec.releaseOutputBuffer(outputBufIndex, false /* render */);
-
- if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
- sawOutputEOS = true;
- }
- } else if (outputBufIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
- codecOutputBuffers = codec.getOutputBuffers();
- } else if (outputBufIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
- MediaFormat newFormat = codec.getOutputFormat();
- outputChannelCount = newFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
- Log.d(LOG_TAG, "output format changed to " + newFormat);
- }
- }
-
- encodedFD.detachFd();
-
- codec.stop();
- codec.release();
- codec = null;
-
- return true;
- }
-
- private static native void nativeOnChunkDecoded(
- int nativeWebAudioMediaCodecBridge, ByteBuffer buf, int size,
- int inputChannelCount, int outputChannelCount);
-
- private static native void nativeInitializeDestination(
- int nativeWebAudioMediaCodecBridge,
- int inputChannelCount,
- int sampleRate,
- long durationMicroseconds);
-}
diff --git a/chromium/media/base/android/media_codec_bridge.cc b/chromium/media/base/android/media_codec_bridge.cc
index 47ee5997160..a029e209805 100644
--- a/chromium/media/base/android/media_codec_bridge.cc
+++ b/chromium/media/base/android/media_codec_bridge.cc
@@ -5,6 +5,7 @@
#include "media/base/android/media_codec_bridge.h"
#include <jni.h>
+#include <string>
#include "base/android/build_info.h"
#include "base/android/jni_android.h"
@@ -20,6 +21,7 @@
#include "media/base/decrypt_config.h"
using base::android::AttachCurrentThread;
+using base::android::ConvertJavaStringToUTF8;
using base::android::ConvertUTF8ToJavaString;
using base::android::ScopedJavaLocalRef;
@@ -27,7 +29,7 @@ namespace media {
enum { kBufferFlagEndOfStream = 4 };
-static const char* AudioCodecToMimeType(const AudioCodec codec) {
+static const std::string AudioCodecToAndroidMimeType(const AudioCodec codec) {
switch (codec) {
case kCodecMP3:
return "audio/mpeg";
@@ -36,21 +38,53 @@ static const char* AudioCodecToMimeType(const AudioCodec codec) {
case kCodecAAC:
return "audio/mp4a-latm";
default:
- return NULL;
+ return std::string();
}
}
-static const char* VideoCodecToMimeType(const VideoCodec codec) {
+static const std::string VideoCodecToAndroidMimeType(const VideoCodec codec) {
switch (codec) {
case kCodecH264:
return "video/avc";
case kCodecVP8:
return "video/x-vnd.on2.vp8";
default:
- return NULL;
+ return std::string();
}
}
+static const std::string CodecTypeToAndroidMimeType(const std::string& codec) {
+ // TODO(xhwang): Shall we handle more detailed strings like "mp4a.40.2"?
+ if (codec == "avc1")
+ return "video/avc";
+ if (codec == "mp4a")
+ return "audio/mp4a-latm";
+ if (codec == "vp8" || codec == "vp8.0")
+ return "video/x-vnd.on2.vp8";
+ if (codec == "vorbis")
+ return "audio/vorbis";
+ return std::string();
+}
+
+// TODO(qinmin): using a map to help all the conversions in this class.
+static const std::string AndroidMimeTypeToCodecType(const std::string& mime) {
+ if (mime == "video/mp4v-es")
+ return "mp4v";
+ if (mime == "video/avc")
+ return "avc1";
+ if (mime == "video/x-vnd.on2.vp8")
+ return "vp8";
+ if (mime == "video/x-vnd.on2.vp9")
+ return "vp9";
+ if (mime == "audio/mp4a-latm")
+ return "mp4a";
+ if (mime == "audio/mpeg")
+ return "mp3";
+ if (mime == "audio/vorbis")
+ return "vorbis";
+ return std::string();
+}
+
static ScopedJavaLocalRef<jintArray> ToJavaIntArray(
JNIEnv* env, scoped_ptr<jint[]> native_array, int size) {
ScopedJavaLocalRef<jintArray> j_array(env, env->NewIntArray(size));
@@ -59,44 +93,78 @@ static ScopedJavaLocalRef<jintArray> ToJavaIntArray(
}
// static
-const base::TimeDelta MediaCodecBridge::kTimeOutInfinity =
- base::TimeDelta::FromMicroseconds(-1);
+bool MediaCodecBridge::IsAvailable() {
+ // MediaCodec is only available on JB and greater.
+ return base::android::BuildInfo::GetInstance()->sdk_int() >= 16;
+}
// static
-const base::TimeDelta MediaCodecBridge::kTimeOutNoWait =
- base::TimeDelta::FromMicroseconds(0);
+void MediaCodecBridge::GetCodecsInfo(
+ std::vector<CodecsInfo>* codecs_info) {
+ JNIEnv* env = AttachCurrentThread();
+ if (!IsAvailable())
+ return;
+
+ std::string mime_type;
+ ScopedJavaLocalRef<jobjectArray> j_codec_info_array =
+ Java_MediaCodecBridge_getCodecsInfo(env);
+ jsize len = env->GetArrayLength(j_codec_info_array.obj());
+ for (jsize i = 0; i < len; ++i) {
+ ScopedJavaLocalRef<jobject> j_info(
+ env, env->GetObjectArrayElement(j_codec_info_array.obj(), i));
+ ScopedJavaLocalRef<jstring> j_codec_type =
+ Java_CodecInfo_codecType(env, j_info.obj());
+ ConvertJavaStringToUTF8(env, j_codec_type.obj(), &mime_type);
+ CodecsInfo info;
+ info.codecs = AndroidMimeTypeToCodecType(mime_type);
+ info.secure_decoder_supported =
+ Java_CodecInfo_isSecureDecoderSupported(env, j_info.obj());
+ codecs_info->push_back(info);
+ }
+}
// static
-bool MediaCodecBridge::IsAvailable() {
- // MediaCodec is only available on JB and greater.
- return base::android::BuildInfo::GetInstance()->sdk_int() >= 16;
+bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
+ JNIEnv* env = AttachCurrentThread();
+ std::string mime = CodecTypeToAndroidMimeType(codec);
+ if (mime.empty())
+ return false;
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
+ ScopedJavaLocalRef<jobject> j_media_codec_bridge =
+ Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure);
+ if (!j_media_codec_bridge.is_null()) {
+ Java_MediaCodecBridge_release(env, j_media_codec_bridge.obj());
+ return true;
+ }
+ return false;
}
-MediaCodecBridge::MediaCodecBridge(const char* mime) {
+MediaCodecBridge::MediaCodecBridge(const std::string& mime, bool is_secure) {
JNIEnv* env = AttachCurrentThread();
CHECK(env);
- DCHECK(mime);
-
- ScopedJavaLocalRef<jstring> j_type = ConvertUTF8ToJavaString(env, mime);
- j_media_codec_.Reset(Java_MediaCodecBridge_create(
- env, j_type.obj()));
+ DCHECK(!mime.empty());
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
+ j_media_codec_.Reset(
+ Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure));
}
MediaCodecBridge::~MediaCodecBridge() {
JNIEnv* env = AttachCurrentThread();
CHECK(env);
- Java_MediaCodecBridge_release(env, j_media_codec_.obj());
+ if (j_media_codec_.obj())
+ Java_MediaCodecBridge_release(env, j_media_codec_.obj());
}
-void MediaCodecBridge::StartInternal() {
+bool MediaCodecBridge::StartInternal() {
JNIEnv* env = AttachCurrentThread();
- Java_MediaCodecBridge_start(env, j_media_codec_.obj());
- GetOutputBuffers();
+ return Java_MediaCodecBridge_start(env, j_media_codec_.obj()) &&
+ GetOutputBuffers();
}
-int MediaCodecBridge::Reset() {
+MediaCodecStatus MediaCodecBridge::Reset() {
JNIEnv* env = AttachCurrentThread();
- return Java_MediaCodecBridge_flush(env, j_media_codec_.obj());
+ return static_cast<MediaCodecStatus>(
+ Java_MediaCodecBridge_flush(env, j_media_codec_.obj()));
}
void MediaCodecBridge::Stop() {
@@ -111,46 +179,69 @@ void MediaCodecBridge::GetOutputFormat(int* width, int* height) {
*height = Java_MediaCodecBridge_getOutputHeight(env, j_media_codec_.obj());
}
-size_t MediaCodecBridge::QueueInputBuffer(
- int index, const uint8* data, int size,
+MediaCodecStatus MediaCodecBridge::QueueInputBuffer(
+ int index, const uint8* data, int data_size,
const base::TimeDelta& presentation_time) {
- size_t size_to_copy = FillInputBuffer(index, data, size);
+ int size_to_copy = FillInputBuffer(index, data, data_size);
+ DCHECK_EQ(size_to_copy, data_size);
JNIEnv* env = AttachCurrentThread();
- Java_MediaCodecBridge_queueInputBuffer(
+ return static_cast<MediaCodecStatus>(Java_MediaCodecBridge_queueInputBuffer(
env, j_media_codec_.obj(),
- index, 0, size_to_copy, presentation_time.InMicroseconds(), 0);
- return size_to_copy;
+ index, 0, size_to_copy, presentation_time.InMicroseconds(), 0));
}
-size_t MediaCodecBridge::QueueSecureInputBuffer(
+MediaCodecStatus MediaCodecBridge::QueueSecureInputBuffer(
int index, const uint8* data, int data_size, const uint8* key_id,
int key_id_size, const uint8* iv, int iv_size,
const SubsampleEntry* subsamples, int subsamples_size,
const base::TimeDelta& presentation_time) {
- size_t size_to_copy = FillInputBuffer(index, data, data_size);
+ int size_to_copy = FillInputBuffer(index, data, data_size);
+ DCHECK_EQ(size_to_copy, data_size);
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_key_id =
base::android::ToJavaByteArray(env, key_id, key_id_size);
ScopedJavaLocalRef<jbyteArray> j_iv =
base::android::ToJavaByteArray(env, iv, iv_size);
- scoped_ptr<jint[]> native_clear_array(new jint[subsamples_size]);
- scoped_ptr<jint[]> native_cypher_array(new jint[subsamples_size]);
- for (int i = 0; i < subsamples_size; ++i) {
- native_clear_array[i] = subsamples[i].clear_bytes;
- native_cypher_array[i] = subsamples[i].cypher_bytes;
+
+ // MediaCodec.CryptoInfo documentations says passing NULL for |clear_array|
+ // to indicate that all data is encrypted. But it doesn't specify what
+ // |cypher_array| and |subsamples_size| should be in that case. Passing
+ // one subsample here just to be on the safe side.
+ int new_subsamples_size = subsamples_size == 0 ? 1 : subsamples_size;
+
+ scoped_ptr<jint[]> native_clear_array(new jint[new_subsamples_size]);
+ scoped_ptr<jint[]> native_cypher_array(new jint[new_subsamples_size]);
+
+ if (subsamples_size == 0) {
+ DCHECK(!subsamples);
+ native_clear_array[0] = 0;
+ native_cypher_array[0] = data_size;
+ } else {
+ DCHECK_GT(subsamples_size, 0);
+ DCHECK(subsamples);
+ for (int i = 0; i < subsamples_size; ++i) {
+ DCHECK(subsamples[i].clear_bytes <= std::numeric_limits<uint16>::max());
+ if (subsamples[i].cypher_bytes >
+ static_cast<uint32>(std::numeric_limits<jint>::max())) {
+ return MEDIA_CODEC_ERROR;
+ }
+
+ native_clear_array[i] = subsamples[i].clear_bytes;
+ native_cypher_array[i] = subsamples[i].cypher_bytes;
+ }
}
- ScopedJavaLocalRef<jintArray> clear_array = ToJavaIntArray(
- env, native_clear_array.Pass(), subsamples_size);
- ScopedJavaLocalRef<jintArray> cypher_array = ToJavaIntArray(
- env, native_cypher_array.Pass(), subsamples_size);
- Java_MediaCodecBridge_queueSecureInputBuffer(
- env, j_media_codec_.obj(), index, 0, j_iv.obj(), j_key_id.obj(),
- clear_array.obj(), cypher_array.obj(), subsamples_size,
- presentation_time.InMicroseconds());
+ ScopedJavaLocalRef<jintArray> clear_array =
+ ToJavaIntArray(env, native_clear_array.Pass(), new_subsamples_size);
+ ScopedJavaLocalRef<jintArray> cypher_array =
+ ToJavaIntArray(env, native_cypher_array.Pass(), new_subsamples_size);
- return size_to_copy;
+ return static_cast<MediaCodecStatus>(
+ Java_MediaCodecBridge_queueSecureInputBuffer(
+ env, j_media_codec_.obj(), index, 0, j_iv.obj(), j_key_id.obj(),
+ clear_array.obj(), cypher_array.obj(), new_subsamples_size,
+ presentation_time.InMicroseconds()));
}
void MediaCodecBridge::QueueEOS(int input_buffer_index) {
@@ -160,36 +251,34 @@ void MediaCodecBridge::QueueEOS(int input_buffer_index) {
input_buffer_index, 0, 0, 0, kBufferFlagEndOfStream);
}
-int MediaCodecBridge::DequeueInputBuffer(base::TimeDelta timeout) {
+MediaCodecStatus MediaCodecBridge::DequeueInputBuffer(
+ const base::TimeDelta& timeout, int* index) {
JNIEnv* env = AttachCurrentThread();
- return Java_MediaCodecBridge_dequeueInputBuffer(
+ ScopedJavaLocalRef<jobject> result = Java_MediaCodecBridge_dequeueInputBuffer(
env, j_media_codec_.obj(), timeout.InMicroseconds());
+ *index = Java_DequeueInputResult_index(env, result.obj());
+ return static_cast<MediaCodecStatus>(
+ Java_DequeueInputResult_status(env, result.obj()));
}
-int MediaCodecBridge::DequeueOutputBuffer(
- base::TimeDelta timeout, size_t* offset, size_t* size,
+MediaCodecStatus MediaCodecBridge::DequeueOutputBuffer(
+ const base::TimeDelta& timeout, int* index, size_t* offset, size_t* size,
base::TimeDelta* presentation_time, bool* end_of_stream) {
JNIEnv* env = AttachCurrentThread();
-
ScopedJavaLocalRef<jobject> result =
Java_MediaCodecBridge_dequeueOutputBuffer(env, j_media_codec_.obj(),
timeout.InMicroseconds());
-
- int j_buffer = Java_DequeueOutputResult_index(env, result.obj());
- if (j_buffer >= 0) {
- int64 presentation_time_us =
- Java_DequeueOutputResult_presentationTimeMicroseconds(
- env, result.obj());
- int flags = Java_DequeueOutputResult_flags(env, result.obj());
- *offset = base::checked_numeric_cast<size_t>(
- Java_DequeueOutputResult_offset(env, result.obj()));
- *size = base::checked_numeric_cast<size_t>(
- Java_DequeueOutputResult_numBytes(env, result.obj()));
- *presentation_time =
- base::TimeDelta::FromMicroseconds(presentation_time_us);
- *end_of_stream = flags & kBufferFlagEndOfStream;
- }
- return j_buffer;
+ *index = Java_DequeueOutputResult_index(env, result.obj());;
+ *offset = base::checked_numeric_cast<size_t>(
+ Java_DequeueOutputResult_offset(env, result.obj()));
+ *size = base::checked_numeric_cast<size_t>(
+ Java_DequeueOutputResult_numBytes(env, result.obj()));
+ *presentation_time = base::TimeDelta::FromMicroseconds(
+ Java_DequeueOutputResult_presentationTimeMicroseconds(env, result.obj()));
+ int flags = Java_DequeueOutputResult_flags(env, result.obj());
+ *end_of_stream = flags & kBufferFlagEndOfStream;
+ return static_cast<MediaCodecStatus>(
+ Java_DequeueOutputResult_status(env, result.obj()));
}
void MediaCodecBridge::ReleaseOutputBuffer(int index, bool render) {
@@ -200,9 +289,9 @@ void MediaCodecBridge::ReleaseOutputBuffer(int index, bool render) {
env, j_media_codec_.obj(), index, render);
}
-void MediaCodecBridge::GetOutputBuffers() {
+bool MediaCodecBridge::GetOutputBuffers() {
JNIEnv* env = AttachCurrentThread();
- Java_MediaCodecBridge_getOutputBuffers(env, j_media_codec_.obj());
+ return Java_MediaCodecBridge_getOutputBuffers(env, j_media_codec_.obj());
}
size_t MediaCodecBridge::FillInputBuffer(
@@ -226,8 +315,9 @@ size_t MediaCodecBridge::FillInputBuffer(
return size_to_copy;
}
-AudioCodecBridge::AudioCodecBridge(const char* mime)
- : MediaCodecBridge(mime) {
+AudioCodecBridge::AudioCodecBridge(const std::string& mime)
+ // Audio codec doesn't care about security level.
+ : MediaCodecBridge(mime, false) {
}
bool AudioCodecBridge::Start(
@@ -235,10 +325,16 @@ bool AudioCodecBridge::Start(
const uint8* extra_data, size_t extra_data_size, bool play_audio,
jobject media_crypto) {
JNIEnv* env = AttachCurrentThread();
- DCHECK(AudioCodecToMimeType(codec));
+
+ if (!media_codec())
+ return false;
+
+ std::string codec_string = AudioCodecToAndroidMimeType(codec);
+ if (codec_string.empty())
+ return false;
ScopedJavaLocalRef<jstring> j_mime =
- ConvertUTF8ToJavaString(env, AudioCodecToMimeType(codec));
+ ConvertUTF8ToJavaString(env, codec_string);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createAudioFormat(
env, j_mime.obj(), sample_rate, channel_count));
@@ -251,8 +347,8 @@ bool AudioCodecBridge::Start(
env, media_codec(), j_format.obj(), media_crypto, 0, play_audio)) {
return false;
}
- StartInternal();
- return true;
+
+ return StartInternal();
}
bool AudioCodecBridge::ConfigureMediaFormat(
@@ -262,7 +358,7 @@ bool AudioCodecBridge::ConfigureMediaFormat(
return true;
JNIEnv* env = AttachCurrentThread();
- switch(codec) {
+ switch (codec) {
case kCodecVorbis:
{
if (extra_data[0] != 2) {
@@ -353,7 +449,7 @@ bool AudioCodecBridge::ConfigureMediaFormat(
}
default:
LOG(ERROR) << "Invalid header encountered for codec: "
- << AudioCodecToMimeType(codec);
+ << AudioCodecToAndroidMimeType(codec);
return false;
}
return true;
@@ -378,18 +474,24 @@ void AudioCodecBridge::SetVolume(double volume) {
Java_MediaCodecBridge_setVolume(env, media_codec(), volume);
}
-VideoCodecBridge::VideoCodecBridge(const char* mime)
- : MediaCodecBridge(mime) {
+VideoCodecBridge::VideoCodecBridge(const std::string& mime, bool is_secure)
+ : MediaCodecBridge(mime, is_secure) {
}
bool VideoCodecBridge::Start(
const VideoCodec codec, const gfx::Size& size, jobject surface,
jobject media_crypto) {
JNIEnv* env = AttachCurrentThread();
- DCHECK(VideoCodecToMimeType(codec));
+
+ if (!media_codec())
+ return false;
+
+ std::string codec_string = VideoCodecToAndroidMimeType(codec);
+ if (codec_string.empty())
+ return false;
ScopedJavaLocalRef<jstring> j_mime =
- ConvertUTF8ToJavaString(env, VideoCodecToMimeType(codec));
+ ConvertUTF8ToJavaString(env, codec_string);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createVideoFormat(
env, j_mime.obj(), size.width(), size.height()));
@@ -398,18 +500,19 @@ bool VideoCodecBridge::Start(
env, media_codec(), j_format.obj(), surface, media_crypto, 0)) {
return false;
}
- StartInternal();
- return true;
+
+ return StartInternal();
}
AudioCodecBridge* AudioCodecBridge::Create(const AudioCodec codec) {
- const char* mime = AudioCodecToMimeType(codec);
- return mime ? new AudioCodecBridge(mime) : NULL;
+ const std::string mime = AudioCodecToAndroidMimeType(codec);
+ return mime.empty() ? NULL : new AudioCodecBridge(mime);
}
-VideoCodecBridge* VideoCodecBridge::Create(const VideoCodec codec) {
- const char* mime = VideoCodecToMimeType(codec);
- return mime ? new VideoCodecBridge(mime) : NULL;
+VideoCodecBridge* VideoCodecBridge::Create(const VideoCodec codec,
+ bool is_secure) {
+ const std::string mime = VideoCodecToAndroidMimeType(codec);
+ return mime.empty() ? NULL : new VideoCodecBridge(mime, is_secure);
}
bool MediaCodecBridge::RegisterMediaCodecBridge(JNIEnv* env) {
@@ -417,4 +520,3 @@ bool MediaCodecBridge::RegisterMediaCodecBridge(JNIEnv* env) {
}
} // namespace media
-
diff --git a/chromium/media/base/android/media_codec_bridge.h b/chromium/media/base/android/media_codec_bridge.h
index a0edcb9e693..df30472e69e 100644
--- a/chromium/media/base/android/media_codec_bridge.h
+++ b/chromium/media/base/android/media_codec_bridge.h
@@ -18,6 +18,21 @@ namespace media {
struct SubsampleEntry;
+// These must be in sync with MediaCodecBridge.MEDIA_CODEC_XXX constants in
+// MediaCodecBridge.java.
+enum MediaCodecStatus {
+ MEDIA_CODEC_OK,
+ MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER,
+ MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER,
+ MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED,
+ MEDIA_CODEC_OUTPUT_FORMAT_CHANGED,
+ MEDIA_CODEC_INPUT_END_OF_STREAM,
+ MEDIA_CODEC_OUTPUT_END_OF_STREAM,
+ MEDIA_CODEC_NO_KEY,
+ MEDIA_CODEC_STOPPED,
+ MEDIA_CODEC_ERROR
+};
+
// This class serves as a bridge for native code to call java functions inside
// Android MediaCodec class. For more information on Android MediaCodec, check
// http://developer.android.com/reference/android/media/MediaCodec.html
@@ -26,19 +41,25 @@ struct SubsampleEntry;
// object.
class MEDIA_EXPORT MediaCodecBridge {
public:
- enum DequeueBufferInfo {
- INFO_OUTPUT_BUFFERS_CHANGED = -3,
- INFO_OUTPUT_FORMAT_CHANGED = -2,
- INFO_TRY_AGAIN_LATER = -1,
- INFO_MEDIA_CODEC_ERROR = -1000,
- };
-
- static const base::TimeDelta kTimeOutInfinity;
- static const base::TimeDelta kTimeOutNoWait;
-
// Returns true if MediaCodec is available on the device.
static bool IsAvailable();
+ // Returns whether MediaCodecBridge has a decoder that |is_secure| and can
+ // decode |codec| type.
+ static bool CanDecode(const std::string& codec, bool is_secure);
+
+ // Represents supported codecs on android. |secure_decoder_supported| is true
+ // if secure decoder is available for the codec type.
+ // TODO(qinmin): Curretly the codecs string only contains one codec, do we
+ // need more specific codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")
+ struct CodecsInfo {
+ std::string codecs;
+ bool secure_decoder_supported;
+ };
+
+ // Get a list of supported codecs.
+ static void GetCodecsInfo(std::vector<CodecsInfo>* codecs_info);
+
virtual ~MediaCodecBridge();
// Resets both input and output, all indices previously returned in calls to
@@ -47,7 +68,7 @@ class MEDIA_EXPORT MediaCodecBridge {
// words, there will be no outputs until new input is provided.
// Returns MEDIA_CODEC_ERROR if an unexpected error happens, or Media_CODEC_OK
// otherwise.
- int Reset();
+ MediaCodecStatus Reset();
// Finishes the decode/encode session. The instance remains active
// and ready to be StartAudio/Video()ed again. HOWEVER, due to the buggy
@@ -61,14 +82,17 @@ class MEDIA_EXPORT MediaCodecBridge {
void GetOutputFormat(int* width, int* height);
// Submits a byte array to the given input buffer. Call this after getting an
- // available buffer from DequeueInputBuffer(). Returns the number of bytes
- // put to the input buffer.
- size_t QueueInputBuffer(int index, const uint8* data, int size,
- const base::TimeDelta& presentation_time);
+ // available buffer from DequeueInputBuffer().
+ MediaCodecStatus QueueInputBuffer(int index,
+ const uint8* data,
+ int size,
+ const base::TimeDelta& presentation_time);
// Similar to the above call, but submits a buffer that is encrypted.
- size_t QueueSecureInputBuffer(
- int index, const uint8* data, int data_size,
+ // Note: NULL |subsamples| indicates the whole buffer is encrypted.
+ MediaCodecStatus QueueSecureInputBuffer(
+ int index,
+ const uint8* data, int data_size,
const uint8* key_id, int key_id_size,
const uint8* iv, int iv_size,
const SubsampleEntry* subsamples, int subsamples_size,
@@ -77,36 +101,48 @@ class MEDIA_EXPORT MediaCodecBridge {
// Submits an empty buffer with a EOS (END OF STREAM) flag.
void QueueEOS(int input_buffer_index);
- // Returns an index (>=0) of an input buffer to be filled with valid data,
- // INFO_TRY_AGAIN_LATER if no such buffer is currently available, or
- // INFO_MEDIA_CODEC_ERROR if unexpected error happens.
- // Use kTimeOutInfinity for infinite timeout.
- int DequeueInputBuffer(base::TimeDelta timeout);
+ // Returns:
+ // MEDIA_CODEC_OK if an input buffer is ready to be filled with valid data,
+ // MEDIA_CODEC_ENQUEUE_INPUT_AGAIN_LATER if no such buffer is available, or
+ // MEDIA_CODEC_ERROR if unexpected error happens.
+ // Note: Never use infinite timeout as this would block the decoder thread and
+ // prevent the decoder job from being released.
+ MediaCodecStatus DequeueInputBuffer(const base::TimeDelta& timeout,
+ int* index);
// Dequeues an output buffer, block at most timeout_us microseconds.
- // Returns the index of an output buffer that has been successfully decoded
- // or one of DequeueBufferInfo above.
- // Use kTimeOutInfinity for infinite timeout.
- int DequeueOutputBuffer(
- base::TimeDelta timeout, size_t* offset, size_t* size,
- base::TimeDelta* presentation_time, bool* end_of_stream);
+ // Returns the status of this operation. If OK is returned, the output
+ // parameters should be populated. Otherwise, the values of output parameters
+ // should not be used.
+ // Note: Never use infinite timeout as this would block the decoder thread and
+ // prevent the decoder job from being released.
+ // TODO(xhwang): Can we drop |end_of_stream| and return
+ // MEDIA_CODEC_OUTPUT_END_OF_STREAM?
+ MediaCodecStatus DequeueOutputBuffer(const base::TimeDelta& timeout,
+ int* index,
+ size_t* offset,
+ size_t* size,
+ base::TimeDelta* presentation_time,
+ bool* end_of_stream);
// Returns the buffer to the codec. If you previously specified a surface
// when configuring this video decoder you can optionally render the buffer.
void ReleaseOutputBuffer(int index, bool render);
// Gets output buffers from media codec and keeps them inside the java class.
- // To access them, use DequeueOutputBuffer().
- void GetOutputBuffers();
+ // To access them, use DequeueOutputBuffer(). Returns whether output buffers
+ // were successfully obtained.
+ bool GetOutputBuffers() WARN_UNUSED_RESULT;
static bool RegisterMediaCodecBridge(JNIEnv* env);
protected:
- explicit MediaCodecBridge(const char* mime);
+ MediaCodecBridge(const std::string& mime, bool is_secure);
// Calls start() against the media codec instance. Used in StartXXX() after
- // configuring media codec.
- void StartInternal();
+ // configuring media codec. Returns whether media codec was successfully
+ // started.
+ bool StartInternal() WARN_UNUSED_RESULT;
jobject media_codec() { return j_media_codec_.obj(); }
@@ -129,7 +165,7 @@ class AudioCodecBridge : public MediaCodecBridge {
// Start the audio codec bridge.
bool Start(const AudioCodec codec, int sample_rate, int channel_count,
const uint8* extra_data, size_t extra_data_size,
- bool play_audio, jobject media_crypto);
+ bool play_audio, jobject media_crypto) WARN_UNUSED_RESULT;
// Play the output buffer. This call must be called after
// DequeueOutputBuffer() and before ReleaseOutputBuffer.
@@ -139,7 +175,7 @@ class AudioCodecBridge : public MediaCodecBridge {
void SetVolume(double volume);
private:
- explicit AudioCodecBridge(const char* mime);
+ explicit AudioCodecBridge(const std::string& mime);
// Configure the java MediaFormat object with the extra codec data passed in.
bool ConfigureMediaFormat(jobject j_format, const AudioCodec codec,
@@ -150,7 +186,7 @@ class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
public:
// Returns an VideoCodecBridge instance if |codec| is supported, or a NULL
// pointer otherwise.
- static VideoCodecBridge* Create(const VideoCodec codec);
+ static VideoCodecBridge* Create(const VideoCodec codec, bool is_secure);
// Start the video codec bridge.
// TODO(qinmin): Pass codec specific data if available.
@@ -158,7 +194,7 @@ class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
jobject media_crypto);
private:
- explicit VideoCodecBridge(const char* mime);
+ VideoCodecBridge(const std::string& mime, bool is_secure);
};
} // namespace media
diff --git a/chromium/media/base/android/media_codec_bridge_unittest.cc b/chromium/media/base/android/media_codec_bridge_unittest.cc
index ee38e6d1a99..1e24b5f28b5 100644
--- a/chromium/media/base/android/media_codec_bridge_unittest.cc
+++ b/chromium/media/base/android/media_codec_bridge_unittest.cc
@@ -95,6 +95,10 @@ namespace media {
static const int kPresentationTimeBase = 100;
+static inline const base::TimeDelta InfiniteTimeOut() {
+ return base::TimeDelta::FromMicroseconds(-1);
+}
+
void DecodeMediaFrame(
VideoCodecBridge* media_codec, const uint8* data, size_t data_size,
const base::TimeDelta input_presentation_timestamp,
@@ -103,17 +107,22 @@ void DecodeMediaFrame(
base::TimeDelta timestamp = initial_timestamp_lower_bound;
base::TimeDelta new_timestamp;
for (int i = 0; i < 10; ++i) {
- int input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ int input_buf_index = -1;
+ MediaCodecStatus status =
+ media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
+ ASSERT_EQ(MEDIA_CODEC_OK, status);
+
media_codec->QueueInputBuffer(
input_buf_index, data, data_size, input_presentation_timestamp);
+
size_t unused_offset = 0;
size_t size = 0;
bool eos = false;
- int output_buf_index = media_codec->DequeueOutputBuffer(
- MediaCodecBridge::kTimeOutInfinity,
- &unused_offset, &size, &new_timestamp, &eos);
- if (output_buf_index > 0)
+ int output_buf_index = -1;
+ status = media_codec->DequeueOutputBuffer(InfiniteTimeOut(),
+ &output_buf_index, &unused_offset, &size, &new_timestamp, &eos);
+
+ if (status == MEDIA_CODEC_OK && output_buf_index > 0)
media_codec->ReleaseOutputBuffer(output_buf_index, false);
// Output time stamp should not be smaller than old timestamp.
ASSERT_TRUE(new_timestamp >= timestamp);
@@ -127,7 +136,7 @@ TEST(MediaCodecBridgeTest, Initialize) {
return;
scoped_ptr<media::MediaCodecBridge> media_codec;
- media_codec.reset(VideoCodecBridge::Create(kCodecH264));
+ media_codec.reset(VideoCodecBridge::Create(kCodecH264, false));
}
TEST(MediaCodecBridgeTest, DoNormal) {
@@ -137,10 +146,12 @@ TEST(MediaCodecBridgeTest, DoNormal) {
scoped_ptr<media::AudioCodecBridge> media_codec;
media_codec.reset(AudioCodecBridge::Create(kCodecMP3));
- media_codec->Start(kCodecMP3, 44100, 2, NULL, 0, false, NULL);
+ ASSERT_TRUE(media_codec->Start(kCodecMP3, 44100, 2, NULL, 0, false, NULL));
- int input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ int input_buf_index = -1;
+ MediaCodecStatus status =
+ media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
+ ASSERT_EQ(MEDIA_CODEC_OK, status);
ASSERT_GE(input_buf_index, 0);
int64 input_pts = kPresentationTimeBase;
@@ -148,14 +159,12 @@ TEST(MediaCodecBridgeTest, DoNormal) {
input_buf_index, test_mp3, sizeof(test_mp3),
base::TimeDelta::FromMicroseconds(++input_pts));
- input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ status = media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
media_codec->QueueInputBuffer(
input_buf_index, test_mp3, sizeof(test_mp3),
base::TimeDelta::FromMicroseconds(++input_pts));
- input_buf_index = media_codec->DequeueInputBuffer(
- MediaCodecBridge::kTimeOutInfinity);
+ status = media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
media_codec->QueueEOS(input_buf_index);
input_pts = kPresentationTimeBase;
@@ -164,21 +173,25 @@ TEST(MediaCodecBridgeTest, DoNormal) {
size_t unused_offset = 0;
size_t size = 0;
base::TimeDelta timestamp;
- int output_buf_index = media_codec->DequeueOutputBuffer(
- MediaCodecBridge::kTimeOutInfinity,
- &unused_offset, &size, &timestamp, &eos);
- switch (output_buf_index) {
- case MediaCodecBridge::INFO_TRY_AGAIN_LATER:
+ int output_buf_index = -1;
+ status = media_codec->DequeueOutputBuffer(InfiniteTimeOut(),
+ &output_buf_index, &unused_offset, &size, &timestamp, &eos);
+ switch (status) {
+ case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
FAIL();
return;
- case MediaCodecBridge::INFO_OUTPUT_FORMAT_CHANGED:
+ case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
continue;
- case MediaCodecBridge::INFO_OUTPUT_BUFFERS_CHANGED:
- media_codec->GetOutputBuffers();
+ case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ ASSERT_TRUE(media_codec->GetOutputBuffers());
continue;
+
+ default:
+ break;
}
+ ASSERT_GE(output_buf_index, 0);
EXPECT_LE(1u, size);
if (!eos)
EXPECT_EQ(++input_pts, timestamp.InMicroseconds());
@@ -222,7 +235,7 @@ TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
return;
scoped_ptr<VideoCodecBridge> media_codec;
- media_codec.reset(VideoCodecBridge::Create(kCodecVP8));
+ media_codec.reset(VideoCodecBridge::Create(kCodecVP8, false));
EXPECT_TRUE(media_codec->Start(
kCodecVP8, gfx::Size(320, 240), NULL, NULL));
scoped_refptr<DecoderBuffer> buffer =
@@ -250,7 +263,7 @@ TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
TEST(MediaCodecBridgeTest, CreateUnsupportedCodec) {
EXPECT_EQ(NULL, AudioCodecBridge::Create(kUnknownAudioCodec));
- EXPECT_EQ(NULL, VideoCodecBridge::Create(kUnknownVideoCodec));
+ EXPECT_EQ(NULL, VideoCodecBridge::Create(kUnknownVideoCodec, false));
}
} // namespace media
diff --git a/chromium/media/base/android/media_decoder_job.cc b/chromium/media/base/android/media_decoder_job.cc
new file mode 100644
index 00000000000..65e9a10b432
--- /dev/null
+++ b/chromium/media/base/android/media_decoder_job.cc
@@ -0,0 +1,350 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_decoder_job.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/bind_to_loop.h"
+
+namespace media {
+
+// Timeout value for media codec operations. Because the first
+// DequeInputBuffer() can take about 150 milliseconds, use 250 milliseconds
+// here. See http://b/9357571.
+static const int kMediaCodecTimeoutInMilliseconds = 250;
+
+MediaDecoderJob::MediaDecoderJob(
+ const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
+ MediaCodecBridge* media_codec_bridge,
+ const base::Closure& request_data_cb)
+ : ui_loop_(base::MessageLoopProxy::current()),
+ decoder_loop_(decoder_loop),
+ media_codec_bridge_(media_codec_bridge),
+ needs_flush_(false),
+ input_eos_encountered_(false),
+ weak_this_(this),
+ request_data_cb_(request_data_cb),
+ access_unit_index_(0),
+ input_buf_index_(-1),
+ stop_decode_pending_(false),
+ destroy_pending_(false) {
+}
+
+MediaDecoderJob::~MediaDecoderJob() {}
+
+void MediaDecoderJob::OnDataReceived(const DemuxerData& data) {
+ DVLOG(1) << __FUNCTION__ << ": " << data.access_units.size() << " units";
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(!on_data_received_cb_.is_null());
+
+ base::Closure done_cb = base::ResetAndReturn(&on_data_received_cb_);
+
+ if (stop_decode_pending_) {
+ OnDecodeCompleted(MEDIA_CODEC_STOPPED, kNoTimestamp(), 0);
+ return;
+ }
+
+ access_unit_index_ = 0;
+ received_data_ = data;
+ done_cb.Run();
+}
+
+void MediaDecoderJob::Prefetch(const base::Closure& prefetch_cb) {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(on_data_received_cb_.is_null());
+ DCHECK(decode_cb_.is_null());
+
+ if (HasData()) {
+ ui_loop_->PostTask(FROM_HERE, prefetch_cb);
+ return;
+ }
+
+ RequestData(prefetch_cb);
+}
+
+bool MediaDecoderJob::Decode(
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback) {
+ DCHECK(decode_cb_.is_null());
+ DCHECK(on_data_received_cb_.is_null());
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+
+ decode_cb_ = callback;
+
+ if (!HasData()) {
+ RequestData(base::Bind(&MediaDecoderJob::DecodeNextAccessUnit,
+ base::Unretained(this),
+ start_time_ticks,
+ start_presentation_timestamp));
+ return true;
+ }
+
+ if (DemuxerStream::kConfigChanged ==
+ received_data_.access_units[access_unit_index_].status) {
+ // Clear received data because we need to handle a config change.
+ decode_cb_.Reset();
+ received_data_ = DemuxerData();
+ access_unit_index_ = 0;
+ return false;
+ }
+
+ DecodeNextAccessUnit(start_time_ticks, start_presentation_timestamp);
+ return true;
+}
+
+void MediaDecoderJob::StopDecode() {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(is_decoding());
+ stop_decode_pending_ = true;
+}
+
+void MediaDecoderJob::Flush() {
+ DCHECK(decode_cb_.is_null());
+
+ // Do nothing, flush when the next Decode() happens.
+ needs_flush_ = true;
+ received_data_ = DemuxerData();
+ input_eos_encountered_ = false;
+ access_unit_index_ = 0;
+ on_data_received_cb_.Reset();
+}
+
+void MediaDecoderJob::Release() {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+
+ destroy_pending_ = is_decoding();
+
+ request_data_cb_.Reset();
+ on_data_received_cb_.Reset();
+ decode_cb_.Reset();
+
+ if (destroy_pending_)
+ return;
+
+ delete this;
+}
+
+MediaCodecStatus MediaDecoderJob::QueueInputBuffer(const AccessUnit& unit) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(decoder_loop_->BelongsToCurrentThread());
+
+ int input_buf_index = input_buf_index_;
+ input_buf_index_ = -1;
+
+ // TODO(xhwang): Hide DequeueInputBuffer() and the index in MediaCodecBridge.
+ if (input_buf_index == -1) {
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
+ kMediaCodecTimeoutInMilliseconds);
+ MediaCodecStatus status =
+ media_codec_bridge_->DequeueInputBuffer(timeout, &input_buf_index);
+ if (status != MEDIA_CODEC_OK) {
+ DVLOG(1) << "DequeueInputBuffer fails: " << status;
+ return status;
+ }
+ }
+
+ // TODO(qinmin): skip frames if video is falling far behind.
+ DCHECK_GE(input_buf_index, 0);
+ if (unit.end_of_stream || unit.data.empty()) {
+ media_codec_bridge_->QueueEOS(input_buf_index);
+ return MEDIA_CODEC_INPUT_END_OF_STREAM;
+ }
+
+ if (unit.key_id.empty() || unit.iv.empty()) {
+ DCHECK(unit.iv.empty() || !unit.key_id.empty());
+ return media_codec_bridge_->QueueInputBuffer(
+ input_buf_index, &unit.data[0], unit.data.size(), unit.timestamp);
+ }
+
+ MediaCodecStatus status = media_codec_bridge_->QueueSecureInputBuffer(
+ input_buf_index,
+ &unit.data[0], unit.data.size(),
+ reinterpret_cast<const uint8*>(&unit.key_id[0]), unit.key_id.size(),
+ reinterpret_cast<const uint8*>(&unit.iv[0]), unit.iv.size(),
+ unit.subsamples.empty() ? NULL : &unit.subsamples[0],
+ unit.subsamples.size(),
+ unit.timestamp);
+
+ // In case of MEDIA_CODEC_NO_KEY, we must reuse the |input_buf_index_|.
+ // Otherwise MediaDrm will report errors.
+ if (status == MEDIA_CODEC_NO_KEY)
+ input_buf_index_ = input_buf_index;
+
+ return status;
+}
+
+bool MediaDecoderJob::HasData() const {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ // When |input_eos_encountered_| is set, |access_units| must not be empty and
+ // |access_unit_index_| must be pointing to an EOS unit. We'll reuse this
+ // unit to flush the decoder until we hit output EOS.
+ DCHECK(!input_eos_encountered_ ||
+ (received_data_.access_units.size() > 0 &&
+ access_unit_index_ < received_data_.access_units.size()))
+ << " (access_units.size(): " << received_data_.access_units.size()
+ << ", access_unit_index_: " << access_unit_index_ << ")";
+ return access_unit_index_ < received_data_.access_units.size() ||
+ input_eos_encountered_;
+}
+
+void MediaDecoderJob::RequestData(const base::Closure& done_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(on_data_received_cb_.is_null());
+ DCHECK(!input_eos_encountered_);
+
+ received_data_ = DemuxerData();
+ access_unit_index_ = 0;
+ on_data_received_cb_ = done_cb;
+
+ request_data_cb_.Run();
+}
+
+void MediaDecoderJob::DecodeNextAccessUnit(
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp) {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(!decode_cb_.is_null());
+
+ decoder_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaDecoderJob::DecodeInternal, base::Unretained(this),
+ received_data_.access_units[access_unit_index_],
+ start_time_ticks, start_presentation_timestamp, needs_flush_,
+ media::BindToLoop(ui_loop_, base::Bind(
+ &MediaDecoderJob::OnDecodeCompleted, base::Unretained(this)))));
+ needs_flush_ = false;
+}
+
+void MediaDecoderJob::DecodeInternal(
+ const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ bool needs_flush,
+ const MediaDecoderJob::DecoderCallback& callback) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(decoder_loop_->BelongsToCurrentThread());
+
+ if (needs_flush) {
+ DVLOG(1) << "DecodeInternal needs flush.";
+ input_eos_encountered_ = false;
+ MediaCodecStatus reset_status = media_codec_bridge_->Reset();
+ if (MEDIA_CODEC_OK != reset_status) {
+ callback.Run(reset_status, start_presentation_timestamp, 0);
+ return;
+ }
+ }
+
+ // For aborted access unit, just skip it and inform the player.
+ if (unit.status == DemuxerStream::kAborted) {
+ // TODO(qinmin): use a new enum instead of MEDIA_CODEC_STOPPED.
+ callback.Run(MEDIA_CODEC_STOPPED, start_presentation_timestamp, 0);
+ return;
+ }
+
+ MediaCodecStatus input_status = MEDIA_CODEC_INPUT_END_OF_STREAM;
+ if (!input_eos_encountered_) {
+ input_status = QueueInputBuffer(unit);
+ if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM) {
+ input_eos_encountered_ = true;
+ } else if (input_status != MEDIA_CODEC_OK) {
+ callback.Run(input_status, start_presentation_timestamp, 0);
+ return;
+ }
+ }
+
+ int buffer_index = 0;
+ size_t offset = 0;
+ size_t size = 0;
+ base::TimeDelta presentation_timestamp;
+ bool output_eos_encountered = false;
+
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
+ kMediaCodecTimeoutInMilliseconds);
+
+ MediaCodecStatus status = media_codec_bridge_->DequeueOutputBuffer(
+ timeout, &buffer_index, &offset, &size, &presentation_timestamp,
+ &output_eos_encountered);
+
+ if (status != MEDIA_CODEC_OK) {
+ if (status == MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED) {
+ if (media_codec_bridge_->GetOutputBuffers())
+ status = MEDIA_CODEC_OK;
+ else
+ status = MEDIA_CODEC_ERROR;
+ }
+ callback.Run(status, start_presentation_timestamp, 0);
+ return;
+ }
+
+ // TODO(xhwang/qinmin): This logic is correct but strange. Clean it up.
+ if (output_eos_encountered)
+ status = MEDIA_CODEC_OUTPUT_END_OF_STREAM;
+ else if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM)
+ status = MEDIA_CODEC_INPUT_END_OF_STREAM;
+
+ base::TimeDelta time_to_render;
+ DCHECK(!start_time_ticks.is_null());
+ if (ComputeTimeToRender()) {
+ time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
+ start_time_ticks + start_presentation_timestamp);
+ }
+
+ // TODO(acolwell): Change to > since the else will never run for audio.
+ if (time_to_render >= base::TimeDelta()) {
+ decoder_loop_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
+ weak_this_.GetWeakPtr(), buffer_index, size,
+ presentation_timestamp, callback, status),
+ time_to_render);
+ return;
+ }
+
+ // TODO(qinmin): The codec is lagging behind, need to recalculate the
+ // |start_presentation_timestamp_| and |start_time_ticks_|.
+ DVLOG(1) << "codec is lagging behind :" << time_to_render.InMicroseconds();
+ ReleaseOutputBuffer(buffer_index, size, presentation_timestamp,
+ callback, status);
+}
+
+void MediaDecoderJob::OnDecodeCompleted(
+ MediaCodecStatus status, const base::TimeDelta& presentation_timestamp,
+ size_t audio_output_bytes) {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+
+ if (destroy_pending_) {
+ delete this;
+ return;
+ }
+
+ DCHECK(!decode_cb_.is_null());
+ switch (status) {
+ case MEDIA_CODEC_OK:
+ case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
+ case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
+ case MEDIA_CODEC_OUTPUT_END_OF_STREAM:
+ if (!input_eos_encountered_)
+ access_unit_index_++;
+ break;
+
+ case MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
+ case MEDIA_CODEC_INPUT_END_OF_STREAM:
+ case MEDIA_CODEC_NO_KEY:
+ case MEDIA_CODEC_STOPPED:
+ case MEDIA_CODEC_ERROR:
+ // Do nothing.
+ break;
+ };
+
+ stop_decode_pending_ = false;
+ base::ResetAndReturn(&decode_cb_).Run(status, presentation_timestamp,
+ audio_output_bytes);
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_decoder_job.h b/chromium/media/base/android/media_decoder_job.h
new file mode 100644
index 00000000000..d5a93b977c7
--- /dev/null
+++ b/chromium/media/base/android/media_decoder_job.h
@@ -0,0 +1,170 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_DECODER_JOB_H_
+#define MEDIA_BASE_ANDROID_MEDIA_DECODER_JOB_H_
+
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "media/base/android/demuxer_stream_player_params.h"
+#include "media/base/android/media_codec_bridge.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+
+// Class for managing all the decoding tasks. Each decoding task will be posted
+// onto the same thread. The thread will be stopped once Stop() is called.
+class MediaDecoderJob {
+ public:
+ struct Deleter {
+ inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
+ };
+
+ // Callback when a decoder job finishes its work. Args: whether decode
+ // finished successfully, presentation time, audio output bytes.
+ typedef base::Callback<void(MediaCodecStatus, const base::TimeDelta&,
+ size_t)> DecoderCallback;
+
+ virtual ~MediaDecoderJob();
+
+ // Called by MediaSourcePlayer when more data for this object has arrived.
+ void OnDataReceived(const DemuxerData& data);
+
+ // Prefetch so we know the decoder job has data when we call Decode().
+ // |prefetch_cb| - Run when prefetching has completed.
+ void Prefetch(const base::Closure& prefetch_cb);
+
+ // Called by MediaSourcePlayer to decode some data.
+ // |callback| - Run when decode operation has completed.
+ //
+ // Returns true if the next decode was started and |callback| will be
+ // called when the decode operation is complete.
+ // Returns false if a config change is needed. |callback| is ignored
+ // and will not be called.
+ bool Decode(const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ const DecoderCallback& callback);
+
+ // Called to stop the last Decode() early.
+ // If the decoder is in the process of decoding the next frame, then
+ // this method will just allow the decode to complete as normal. If
+ // this object is waiting for a data request to complete, then this method
+ // will wait for the data to arrive and then call the |callback|
+ // passed to Decode() with a status of MEDIA_CODEC_STOPPED. This ensures that
+ // the |callback| passed to Decode() is always called and the status
+ // reflects whether data was actually decoded or the decode terminated early.
+ void StopDecode();
+
+ // Flush the decoder.
+ void Flush();
+
+ bool is_decoding() const { return !decode_cb_.is_null(); }
+
+ protected:
+ MediaDecoderJob(const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
+ MediaCodecBridge* media_codec_bridge,
+ const base::Closure& request_data_cb);
+
+ // Release the output buffer and render it.
+ virtual void ReleaseOutputBuffer(
+ int outputBufferIndex, size_t size,
+ const base::TimeDelta& presentation_timestamp,
+ const DecoderCallback& callback,
+ MediaCodecStatus status) = 0;
+
+ // Returns true if the "time to render" needs to be computed for frames in
+ // this decoder job.
+ virtual bool ComputeTimeToRender() const = 0;
+
+ private:
+ // Causes this instance to be deleted on the thread it is bound to.
+ void Release();
+
+ MediaCodecStatus QueueInputBuffer(const AccessUnit& unit);
+
+ // Returns true if this object has data to decode.
+ bool HasData() const;
+
+ // Initiates a request for more data.
+ // |done_cb| is called when more data is available in |received_data_|.
+ void RequestData(const base::Closure& done_cb);
+
+ // Posts a task to start decoding the next access unit in |received_data_|.
+ void DecodeNextAccessUnit(
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp);
+
+ // Helper function to decoder data on |thread_|. |unit| contains all the data
+ // to be decoded. |start_time_ticks| and |start_presentation_timestamp|
+ // represent the system time and the presentation timestamp when the first
+ // frame is rendered. We use these information to estimate when the current
+ // frame should be rendered. If |needs_flush| is true, codec needs to be
+ // flushed at the beginning of this call.
+ void DecodeInternal(const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ bool needs_flush,
+ const DecoderCallback& callback);
+
+ // Called on the UI thread to indicate that one decode cycle has completed.
+ void OnDecodeCompleted(MediaCodecStatus status,
+ const base::TimeDelta& presentation_timestamp,
+ size_t audio_output_bytes);
+
+ // The UI message loop where callbacks should be dispatched.
+ scoped_refptr<base::MessageLoopProxy> ui_loop_;
+
+ // The message loop that decoder job runs on.
+ scoped_refptr<base::MessageLoopProxy> decoder_loop_;
+
+ // The media codec bridge used for decoding. Owned by derived class.
+ // NOTE: This MUST NOT be accessed in the destructor.
+ MediaCodecBridge* media_codec_bridge_;
+
+ // Whether the decoder needs to be flushed.
+ bool needs_flush_;
+
+ // Whether input EOS is encountered.
+ bool input_eos_encountered_;
+
+ // Weak pointer passed to media decoder jobs for callbacks. It is bounded to
+ // the decoder thread.
+ base::WeakPtrFactory<MediaDecoderJob> weak_this_;
+
+ // Callback used to request more data.
+ base::Closure request_data_cb_;
+
+ // Callback to run when new data has been received.
+ base::Closure on_data_received_cb_;
+
+ // Callback to run when the current Decode() operation completes.
+ DecoderCallback decode_cb_;
+
+ // The current access unit being processed.
+ size_t access_unit_index_;
+
+ // Data received over IPC from last RequestData() operation.
+ DemuxerData received_data_;
+
+ // The index of input buffer that can be used by QueueInputBuffer().
+ // If the index is uninitialized or invalid, it must be -1.
+ int input_buf_index_;
+
+ bool stop_decode_pending_;
+
+ // Indicates that this object should be destroyed once the current
+ // Decode() has completed. This gets set when Release() gets called
+ // while there is a decode in progress.
+ bool destroy_pending_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MediaDecoderJob);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_DECODER_JOB_H_
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
index ee4fbbdf115..95f38eddef9 100644
--- a/chromium/media/base/android/media_drm_bridge.cc
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -4,11 +4,18 @@
#include "media/base/android/media_drm_bridge.h"
+#include "base/android/build_info.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
+#include "base/callback_helpers.h"
+#include "base/location.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "jni/MediaDrmBridge_jni.h"
#include "media/base/android/media_player_manager.h"
+using base::android::AttachCurrentThread;
+using base::android::ConvertUTF8ToJavaString;
using base::android::ConvertJavaStringToUTF8;
using base::android::JavaByteArrayToByteVector;
using base::android::ScopedJavaLocalRef;
@@ -134,54 +141,160 @@ static bool GetPsshData(const uint8* data, int data_size,
return false;
}
+bool MediaDrmBridge::can_use_media_drm_ = false;
+
+static MediaDrmBridge::SecurityLevel GetSecurityLevelFromString(
+ const std::string& security_level_str) {
+ if (0 == security_level_str.compare("L1"))
+ return MediaDrmBridge::SECURITY_LEVEL_1;
+ if (0 == security_level_str.compare("L3"))
+ return MediaDrmBridge::SECURITY_LEVEL_3;
+ DCHECK(security_level_str.empty());
+ return MediaDrmBridge::SECURITY_LEVEL_NONE;
+}
+
+// static
+scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
+ int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ const GURL& frame_url,
+ const std::string& security_level,
+ MediaPlayerManager* manager) {
+ scoped_ptr<MediaDrmBridge> media_drm_bridge;
+
+ if (IsAvailable() && !scheme_uuid.empty()) {
+ // TODO(qinmin): check whether the uuid is valid.
+ media_drm_bridge.reset(new MediaDrmBridge(
+ media_keys_id, scheme_uuid, frame_url, security_level, manager));
+ if (media_drm_bridge->j_media_drm_.is_null())
+ media_drm_bridge.reset();
+ }
+
+ return media_drm_bridge.Pass();
+}
+
// static
bool MediaDrmBridge::IsAvailable() {
- return false;
+ return can_use_media_drm_ &&
+ base::android::BuildInfo::GetInstance()->sdk_int() >= 18;
+}
+
+// static
+bool MediaDrmBridge::IsSecureDecoderRequired(
+ const std::string& security_level_str) {
+ return IsSecureDecoderRequired(
+ GetSecurityLevelFromString(security_level_str));
}
-MediaDrmBridge* MediaDrmBridge::Create(int media_keys_id,
- const std::vector<uint8>& uuid,
- MediaPlayerManager* manager) {
- if (!IsAvailable())
- return NULL;
+bool MediaDrmBridge::IsSecurityLevelSupported(
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level) {
+ // Pass 0 as |media_keys_id| and NULL as |manager| as they are not used in
+ // creation time of MediaDrmBridge.
+ return MediaDrmBridge::Create(0, scheme_uuid, GURL(), security_level, NULL) !=
+ NULL;
+}
- // TODO(qinmin): check whether the uuid is valid.
- return new MediaDrmBridge(media_keys_id, uuid, manager);
+bool MediaDrmBridge::IsCryptoSchemeSupported(
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& container_mime_type) {
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
+ base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
+ ScopedJavaLocalRef<jstring> j_container_mime_type =
+ ConvertUTF8ToJavaString(env, container_mime_type);
+ return Java_MediaDrmBridge_isCryptoSchemeSupported(
+ env, j_scheme_uuid.obj(), j_container_mime_type.obj());
+}
+
+bool MediaDrmBridge::RegisterMediaDrmBridge(JNIEnv* env) {
+ return RegisterNativesImpl(env);
}
MediaDrmBridge::MediaDrmBridge(int media_keys_id,
- const std::vector<uint8>& uuid,
+ const std::vector<uint8>& scheme_uuid,
+ const GURL& frame_url,
+ const std::string& security_level,
MediaPlayerManager* manager)
- : media_keys_id_(media_keys_id), uuid_(uuid), manager_(manager) {
- // TODO(qinmin): pass the uuid to DRM engine.
+ : media_keys_id_(media_keys_id),
+ scheme_uuid_(scheme_uuid),
+ frame_url_(frame_url),
+ manager_(manager) {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+
+ ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
+ base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
+ ScopedJavaLocalRef<jstring> j_security_level =
+ ConvertUTF8ToJavaString(env, security_level);
+ j_media_drm_.Reset(Java_MediaDrmBridge_create(
+ env, j_scheme_uuid.obj(), j_security_level.obj(),
+ reinterpret_cast<intptr_t>(this)));
}
-MediaDrmBridge::~MediaDrmBridge() {}
+MediaDrmBridge::~MediaDrmBridge() {
+ JNIEnv* env = AttachCurrentThread();
+ if (!j_media_drm_.is_null())
+ Java_MediaDrmBridge_release(env, j_media_drm_.obj());
+}
bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
const uint8* init_data,
int init_data_length) {
std::vector<uint8> pssh_data;
- if (!GetPsshData(init_data, init_data_length, uuid_, &pssh_data))
+ if (!GetPsshData(init_data, init_data_length, scheme_uuid_, &pssh_data))
return false;
- NOTIMPLEMENTED();
- return false;
-}
-
-void MediaDrmBridge::CancelKeyRequest(const std::string& session_id) {
- NOTIMPLEMENTED();
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_pssh_data =
+ base::android::ToJavaByteArray(env, &pssh_data[0], pssh_data.size());
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, type);
+ Java_MediaDrmBridge_generateKeyRequest(
+ env, j_media_drm_.obj(), j_pssh_data.obj(), j_mime.obj());
+ return true;
}
void MediaDrmBridge::AddKey(const uint8* key, int key_length,
const uint8* init_data, int init_data_length,
const std::string& session_id) {
- NOTIMPLEMENTED();
+ DVLOG(1) << __FUNCTION__;
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_key_data =
+ base::android::ToJavaByteArray(env, key, key_length);
+ ScopedJavaLocalRef<jstring> j_session_id =
+ ConvertUTF8ToJavaString(env, session_id);
+ Java_MediaDrmBridge_addKey(
+ env, j_media_drm_.obj(), j_session_id.obj(), j_key_data.obj());
}
-ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
- NOTIMPLEMENTED();
- return ScopedJavaLocalRef<jobject>();
+void MediaDrmBridge::CancelKeyRequest(const std::string& session_id) {
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jstring> j_session_id =
+ ConvertUTF8ToJavaString(env, session_id);
+ Java_MediaDrmBridge_cancelKeyRequest(
+ env, j_media_drm_.obj(), j_session_id.obj());
+}
+
+void MediaDrmBridge::SetMediaCryptoReadyCB(const base::Closure& closure) {
+ if (closure.is_null()) {
+ media_crypto_ready_cb_.Reset();
+ return;
+ }
+
+ DCHECK(media_crypto_ready_cb_.is_null());
+
+ if (!GetMediaCrypto().is_null()) {
+ base::MessageLoopProxy::current()->PostTask(FROM_HERE, closure);
+ return;
+ }
+
+ media_crypto_ready_cb_ = closure;
+}
+
+void MediaDrmBridge::OnMediaCryptoReady(JNIEnv* env, jobject) {
+ DCHECK(!GetMediaCrypto().is_null());
+ if (!media_crypto_ready_cb_.is_null())
+ base::ResetAndReturn(&media_crypto_ready_cb_).Run();
}
void MediaDrmBridge::OnKeyMessage(JNIEnv* env,
@@ -197,13 +310,51 @@ void MediaDrmBridge::OnKeyMessage(JNIEnv* env,
manager_->OnKeyMessage(media_keys_id_, session_id, message, destination_url);
}
-void MediaDrmBridge::OnDrmEvent(JNIEnv* env,
- jobject j_media_drm,
- jstring session_id,
- jint event,
- jint extra,
- jstring data) {
- NOTIMPLEMENTED();
+void MediaDrmBridge::OnKeyAdded(JNIEnv* env, jobject, jstring j_session_id) {
+ std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
+ manager_->OnKeyAdded(media_keys_id_, session_id);
+}
+
+void MediaDrmBridge::OnKeyError(JNIEnv* env, jobject, jstring j_session_id) {
+ // |j_session_id| can be NULL, in which case we'll return an empty string.
+ std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
+ manager_->OnKeyError(media_keys_id_, session_id, MediaKeys::kUnknownError, 0);
+}
+
+ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
+ JNIEnv* env = AttachCurrentThread();
+ return Java_MediaDrmBridge_getMediaCrypto(env, j_media_drm_.obj());
+}
+
+// static
+bool MediaDrmBridge::IsSecureDecoderRequired(SecurityLevel security_level) {
+ return MediaDrmBridge::SECURITY_LEVEL_1 == security_level;
+}
+
+MediaDrmBridge::SecurityLevel MediaDrmBridge::GetSecurityLevel() {
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jstring> j_security_level =
+ Java_MediaDrmBridge_getSecurityLevel(env, j_media_drm_.obj());
+ std::string security_level_str =
+ ConvertJavaStringToUTF8(env, j_security_level.obj());
+ return GetSecurityLevelFromString(security_level_str);
+}
+
+bool MediaDrmBridge::IsProtectedSurfaceRequired() {
+ return IsSecureDecoderRequired(GetSecurityLevel());
+}
+
+void MediaDrmBridge::ResetDeviceCredentials(
+ const ResetCredentialsCB& callback) {
+ DCHECK(reset_credentials_cb_.is_null());
+ reset_credentials_cb_ = callback;
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaDrmBridge_resetDeviceCredentials(env, j_media_drm_.obj());
+}
+
+void MediaDrmBridge::OnResetDeviceCredentialsCompleted(
+ JNIEnv* env, jobject, bool success) {
+ base::ResetAndReturn(&reset_credentials_cb_).Run(success);
}
} // namespace media
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
index 42b67909ca7..0399beb1d7d 100644
--- a/chromium/media/base/android/media_drm_bridge.h
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -10,8 +10,13 @@
#include <vector>
#include "base/android/scoped_java_ref.h"
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/media_keys.h"
+#include "url/gurl.h"
+
+class GURL;
namespace media {
@@ -21,17 +26,38 @@ class MediaPlayerManager;
// TODO(qinmin): implement all the functions in this class.
class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
public:
+ enum SecurityLevel {
+ SECURITY_LEVEL_NONE = 0,
+ SECURITY_LEVEL_1 = 1,
+ SECURITY_LEVEL_3 = 3,
+ };
+
+ typedef base::Callback<void(bool)> ResetCredentialsCB;
+
virtual ~MediaDrmBridge();
- // Returns a MediaDrmBridge instance if |uuid| is supported, or a NULL
+ // Returns a MediaDrmBridge instance if |scheme_uuid| is supported, or a NULL
// pointer otherwise.
- static MediaDrmBridge* Create(int media_keys_id,
- const std::vector<uint8>& uuid,
- MediaPlayerManager* manager);
-
- // Checks whether DRM is available.
+ static scoped_ptr<MediaDrmBridge> Create(
+ int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ const GURL& frame_url,
+ const std::string& security_level,
+ MediaPlayerManager* manager);
+
+ // Checks whether MediaDRM is available.
static bool IsAvailable();
+ static bool IsSecurityLevelSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level);
+
+ static bool IsCryptoSchemeSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& container_mime_type);
+
+ static bool IsSecureDecoderRequired(const std::string& security_level_str);
+
+ static bool RegisterMediaDrmBridge(JNIEnv* env);
+
// MediaKeys implementations.
virtual bool GenerateKeyRequest(const std::string& type,
const uint8* init_data,
@@ -41,33 +67,78 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
const std::string& session_id) OVERRIDE;
virtual void CancelKeyRequest(const std::string& session_id) OVERRIDE;
- // Drm related message was received.
- void OnDrmEvent(JNIEnv* env, jobject, jstring session_id,
- jint event, jint extra, jstring data);
+ // Returns a MediaCrypto object if it's already created. Returns a null object
+ // otherwise.
+ base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
+
+ // Sets callback which will be called when MediaCrypto is ready.
+ // If |closure| is null, previously set callback will be cleared.
+ void SetMediaCryptoReadyCB(const base::Closure& closure);
+
+ // Called after a MediaCrypto object is created.
+ void OnMediaCryptoReady(JNIEnv* env, jobject);
// Called after we got the response for GenerateKeyRequest().
- void OnKeyMessage(JNIEnv* env, jobject, jstring session_id,
+ void OnKeyMessage(JNIEnv* env, jobject, jstring j_session_id,
jbyteArray message, jstring destination_url);
- // Methods to create and release a MediaCrypto object.
- base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
+ // Called when key is added.
+ void OnKeyAdded(JNIEnv* env, jobject, jstring j_session_id);
+
+ // Called when error happens.
+ void OnKeyError(JNIEnv* env, jobject, jstring j_session_id);
+
+ // Reset the device credentials.
+ void ResetDeviceCredentials(const ResetCredentialsCB& callback);
+
+ // Called by the java object when credential reset is completed.
+ void OnResetDeviceCredentialsCompleted(JNIEnv* env, jobject, bool success);
+
+ // Helper function to determine whether a protected surface is needed for the
+ // video playback.
+ bool IsProtectedSurfaceRequired();
int media_keys_id() const { return media_keys_id_; }
+ GURL frame_url() const { return frame_url_; }
+
+ static void set_can_use_media_drm(bool can_use_media_drm) {
+ can_use_media_drm_ = can_use_media_drm;
+ }
+
private:
+ static bool IsSecureDecoderRequired(SecurityLevel security_level);
+
+ static bool can_use_media_drm_;
+
MediaDrmBridge(int media_keys_id,
- const std::vector<uint8>& uuid,
+ const std::vector<uint8>& scheme_uuid,
+ const GURL& frame_url,
+ const std::string& security_level,
MediaPlayerManager* manager);
- // Id of the MediaKeys object.
+ // Get the security level of the media.
+ SecurityLevel GetSecurityLevel();
+
+ // ID of the MediaKeys object.
int media_keys_id_;
// UUID of the key system.
- std::vector<uint8> uuid_;
+ std::vector<uint8> scheme_uuid_;
+
+ // media stream's frame URL.
+ const GURL frame_url_;
+
+ // Java MediaDrm instance.
+ base::android::ScopedJavaGlobalRef<jobject> j_media_drm_;
// Non-owned pointer.
MediaPlayerManager* manager_;
+ base::Closure media_crypto_ready_cb_;
+
+ ResetCredentialsCB reset_credentials_cb_;
+
DISALLOW_COPY_AND_ASSIGN(MediaDrmBridge);
};
diff --git a/chromium/media/base/android/media_jni_registrar.cc b/chromium/media/base/android/media_jni_registrar.cc
deleted file mode 100644
index 93a46c3b545..00000000000
--- a/chromium/media/base/android/media_jni_registrar.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/android/media_jni_registrar.h"
-
-#include "base/basictypes.h"
-#include "base/android/jni_android.h"
-#include "base/android/jni_registrar.h"
-
-#include "media/audio/android/audio_manager_android.h"
-#include "media/base/android/media_codec_bridge.h"
-#include "media/base/android/media_player_bridge.h"
-#include "media/base/android/media_player_listener.h"
-#include "media/base/android/webaudio_media_codec_bridge.h"
-#include "media/video/capture/android/video_capture_device_android.h"
-
-namespace media {
-
-static base::android::RegistrationMethod kMediaRegisteredMethods[] = {
- { "AudioManagerAndroid",
- AudioManagerAndroid::RegisterAudioManager },
- { "MediaCodecBridge",
- MediaCodecBridge::RegisterMediaCodecBridge },
- { "MediaPlayerBridge",
- MediaPlayerBridge::RegisterMediaPlayerBridge },
- { "MediaPlayerListener",
- MediaPlayerListener::RegisterMediaPlayerListener },
- { "VideoCaptureDevice",
- VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice },
- { "WebAudioMediaCodecBridge",
- WebAudioMediaCodecBridge::RegisterWebAudioMediaCodecBridge },
-};
-
-bool RegisterJni(JNIEnv* env) {
- return base::android::RegisterNativeMethods(
- env, kMediaRegisteredMethods, arraysize(kMediaRegisteredMethods));
-}
-
-} // namespace media
diff --git a/chromium/media/base/android/media_jni_registrar.h b/chromium/media/base/android/media_jni_registrar.h
deleted file mode 100644
index 7e937028f81..00000000000
--- a/chromium/media/base/android/media_jni_registrar.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_ANDROID_MEDIA_JNI_REGISTRAR_H_
-#define MEDIA_BASE_ANDROID_MEDIA_JNI_REGISTRAR_H_
-
-#include <jni.h>
-
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Register all JNI bindings necessary for media.
-MEDIA_EXPORT bool RegisterJni(JNIEnv* env);
-
-} // namespace media
-
-#endif // MEDIA_BASE_ANDROID_MEDIA_JNI_REGISTRAR_H_
diff --git a/chromium/media/base/android/media_player_android.cc b/chromium/media/base/android/media_player_android.cc
index c0055069e69..101ab436df1 100644
--- a/chromium/media/base/android/media_player_android.cc
+++ b/chromium/media/base/android/media_player_android.cc
@@ -63,24 +63,6 @@ void MediaPlayerAndroid::ReleaseMediaResourcesFromManager() {
manager_->ReleaseMediaResources(player_id_);
}
-void MediaPlayerAndroid::DemuxerReady(
- const MediaPlayerHostMsg_DemuxerReady_Params& params) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
-void MediaPlayerAndroid::ReadFromDemuxerAck(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
-void MediaPlayerAndroid::OnSeekRequestAck(unsigned seek_request_id) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
-void MediaPlayerAndroid::DurationChanged(const base::TimeDelta& duration) {
- NOTREACHED() << "Unexpected ipc received";
-}
-
GURL MediaPlayerAndroid::GetUrl() {
return GURL();
}
@@ -94,4 +76,9 @@ void MediaPlayerAndroid::SetDrmBridge(MediaDrmBridge* drm_bridge) {
return;
}
+void MediaPlayerAndroid::OnKeyAdded() {
+ // Not all players care about the decryption key. Do nothing by default.
+ return;
+}
+
} // namespace media
diff --git a/chromium/media/base/android/media_player_android.h b/chromium/media/base/android/media_player_android.h
index f1c9c37ee09..0968d3513f9 100644
--- a/chromium/media/base/android/media_player_android.h
+++ b/chromium/media/base/android/media_player_android.h
@@ -10,7 +10,6 @@
#include "base/callback.h"
#include "base/time/time.h"
-#include "media/base/android/demuxer_stream_player_params.h"
#include "media/base/media_export.h"
#include "ui/gl/android/scoped_java_surface.h"
#include "url/gurl.h"
@@ -35,25 +34,6 @@ class MEDIA_EXPORT MediaPlayerAndroid {
MEDIA_ERROR_INVALID_CODE,
};
- // Types of media source that this object will play.
- enum SourceType {
- SOURCE_TYPE_URL,
- SOURCE_TYPE_MSE, // W3C Media Source Extensions
- SOURCE_TYPE_STREAM, // W3C Media Stream, e.g. getUserMedia().
- };
-
- // Construct a MediaPlayerAndroid object with all the needed media player
- // callbacks. This object needs to call |manager_|'s RequestMediaResources()
- // before decoding the media stream. This allows |manager_| to track
- // unused resources and free them when needed. On the other hand, it needs
- // to call ReleaseMediaResources() when it is done with decoding.
- static MediaPlayerAndroid* Create(int player_id,
- const GURL& url,
- SourceType source_type,
- const GURL& first_party_for_cookies,
- bool hide_url_log,
- MediaPlayerManager* manager);
-
// Passing an external java surface object to the player.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) = 0;
@@ -61,7 +41,7 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual void Start() = 0;
// Pause the media.
- virtual void Pause() = 0;
+ virtual void Pause(bool is_media_related_action) = 0;
// Seek to a particular position. When succeeds, OnSeekComplete() will be
// called. Otherwise, nothing will happen.
@@ -86,23 +66,13 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual GURL GetUrl();
virtual GURL GetFirstPartyForCookies();
- // Methods for DemuxerStreamPlayer.
- // Informs DemuxerStreamPlayer that the demuxer is ready.
- virtual void DemuxerReady(
- const MediaPlayerHostMsg_DemuxerReady_Params& params);
- // Called when the requested data is received from the demuxer.
- virtual void ReadFromDemuxerAck(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params);
-
- // Called when a seek request is acked by the render process.
- virtual void OnSeekRequestAck(unsigned seek_request_id);
-
- // Called when the demuxer has changed the duration.
- virtual void DurationChanged(const base::TimeDelta& duration);
-
// Pass a drm bridge to a player.
virtual void SetDrmBridge(MediaDrmBridge* drm_bridge);
+ // Notifies the player that a decryption key has been added. The player
+ // may want to start/resume playback if it is waiting for a key.
+ virtual void OnKeyAdded();
+
int player_id() { return player_id_; }
protected:
diff --git a/chromium/media/base/android/media_player_bridge.cc b/chromium/media/base/android/media_player_bridge.cc
index 342ceaa7902..f570bdc8677 100644
--- a/chromium/media/base/android/media_player_bridge.cc
+++ b/chromium/media/base/android/media_player_bridge.cc
@@ -12,7 +12,6 @@
#include "jni/MediaPlayerBridge_jni.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_resource_getter.h"
-#include "media/base/android/media_source_player.h"
using base::android::ConvertUTF8ToJavaString;
using base::android::ScopedJavaLocalRef;
@@ -20,41 +19,8 @@ using base::android::ScopedJavaLocalRef;
// Time update happens every 250ms.
static const int kTimeUpdateInterval = 250;
-// Android MediaMetadataRetriever may fail to extract the metadata from the
-// media under some circumstances. This makes the user unable to perform
-// seek. To solve this problem, we use a temporary duration of 100 seconds when
-// the duration is unknown. And we scale the seek position later when duration
-// is available.
-static const int kTemporaryDuration = 100;
-
namespace media {
-#if !defined(GOOGLE_TV)
-// static
-MediaPlayerAndroid* MediaPlayerAndroid::Create(
- int player_id,
- const GURL& url,
- SourceType source_type,
- const GURL& first_party_for_cookies,
- bool hide_url_log,
- MediaPlayerManager* manager) {
- if (source_type == SOURCE_TYPE_URL) {
- MediaPlayerBridge* media_player_bridge = new MediaPlayerBridge(
- player_id,
- url,
- first_party_for_cookies,
- hide_url_log,
- manager);
- media_player_bridge->Initialize();
- return media_player_bridge;
- } else {
- return new MediaSourcePlayer(
- player_id,
- manager);
- }
-}
-#endif
-
MediaPlayerBridge::MediaPlayerBridge(
int player_id,
const GURL& url,
@@ -68,7 +34,6 @@ MediaPlayerBridge::MediaPlayerBridge(
url_(url),
first_party_for_cookies_(first_party_for_cookies),
hide_url_log_(hide_url_log),
- duration_(base::TimeDelta::FromSeconds(kTemporaryDuration)),
width_(0),
height_(0),
can_pause_(true),
@@ -216,7 +181,7 @@ void MediaPlayerBridge::Start() {
}
}
-void MediaPlayerBridge::Pause() {
+void MediaPlayerBridge::Pause(bool is_media_related_action) {
if (j_media_player_bridge_.is_null()) {
pending_play_ = false;
} else {
@@ -331,16 +296,8 @@ void MediaPlayerBridge::OnMediaPrepared() {
return;
prepared_ = true;
-
- base::TimeDelta dur = duration_;
duration_ = GetDuration();
- if (duration_ != dur && 0 != dur.InMilliseconds()) {
- // Scale the |pending_seek_| according to the new duration.
- pending_seek_ = base::TimeDelta::FromSeconds(
- pending_seek_.InSecondsF() * duration_.InSecondsF() / dur.InSecondsF());
- }
-
// If media player was recovered from a saved state, consume all the pending
// events.
PendingSeekInternal(pending_seek_);
@@ -402,7 +359,6 @@ void MediaPlayerBridge::SeekInternal(base::TimeDelta time) {
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
-
int time_msec = static_cast<int>(time.InMilliseconds());
Java_MediaPlayerBridge_seekTo(
env, j_media_player_bridge_.obj(), time_msec);
diff --git a/chromium/media/base/android/media_player_bridge.h b/chromium/media/base/android/media_player_bridge.h
index 85a29604058..7bd4beb082f 100644
--- a/chromium/media/base/android/media_player_bridge.h
+++ b/chromium/media/base/android/media_player_bridge.h
@@ -55,7 +55,7 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
// MediaPlayerAndroid implementation.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
- virtual void Pause() OVERRIDE;
+ virtual void Pause(bool is_media_related_action ALLOW_UNUSED) OVERRIDE;
virtual void SeekTo(base::TimeDelta time) OVERRIDE;
virtual void Release() OVERRIDE;
virtual void SetVolume(double volume) OVERRIDE;
diff --git a/chromium/media/base/android/media_player_manager.h b/chromium/media/base/android/media_player_manager.h
index a0f5017c13f..4ecac22518b 100644
--- a/chromium/media/base/android/media_player_manager.h
+++ b/chromium/media/base/android/media_player_manager.h
@@ -14,10 +14,6 @@
#include "media/base/media_export.h"
#include "media/base/media_keys.h"
-namespace content {
-class RenderViewHost;
-}
-
namespace media {
class MediaDrmBridge;
@@ -25,25 +21,8 @@ class MediaPlayerAndroid;
class MediaResourceGetter;
// This class is responsible for managing active MediaPlayerAndroid objects.
-// Objects implementing this interface a created via
-// MediaPlayerManager::Create(), allowing embedders to provide their
-// implementation.
class MEDIA_EXPORT MediaPlayerManager {
public:
- // The type of the factory function that returns a new instance of the
- // MediaPlayerManager implementation.
- typedef MediaPlayerManager* (*FactoryFunction)(content::RenderViewHost*);
-
- // Allows to override the default factory function in order to provide
- // a custom implementation to the RenderViewHost instance.
- // Must be called from the main thread.
- static void RegisterFactoryFunction(FactoryFunction factory_function);
-
- // Returns a new instance of MediaPlayerManager interface implementation.
- // The returned object is owned by the caller. Must be called on the main
- // thread.
- static MediaPlayerManager* Create(content::RenderViewHost* render_view_host);
-
virtual ~MediaPlayerManager() {}
// Called by a MediaPlayerAndroid object when it is going to decode
@@ -100,17 +79,6 @@ class MEDIA_EXPORT MediaPlayerManager {
// Release all the players managed by this object.
virtual void DestroyAllMediaPlayers() = 0;
- // Callback when DemuxerStreamPlayer wants to read data from the demuxer.
- virtual void OnReadFromDemuxer(int player_id,
- media::DemuxerStream::Type type) = 0;
-
- // Called when player wants the media element to initiate a seek.
- virtual void OnMediaSeekRequest(int player_id, base::TimeDelta time_to_seek,
- unsigned seek_request_id) = 0;
-
- // Called when player wants to read the config data from the demuxer.
- virtual void OnMediaConfigRequest(int player_id) = 0;
-
// Get the MediaDrmBridge object for the given media key Id.
virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) = 0;
diff --git a/chromium/media/base/android/media_source_player.cc b/chromium/media/base/android/media_source_player.cc
index 3af8a2b0d3c..223515e9992 100644
--- a/chromium/media/base/android/media_source_player.cc
+++ b/chromium/media/base/android/media_source_player.cc
@@ -4,330 +4,66 @@
#include "media/base/android/media_source_player.h"
+#include <limits>
+
#include "base/android/jni_android.h"
#include "base/android/jni_string.h"
+#include "base/barrier_closure.h"
#include "base/basictypes.h"
#include "base/bind.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "base/threading/thread.h"
-#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/audio_decoder_job.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
+#include "media/base/android/video_decoder_job.h"
#include "media/base/audio_timestamp_helper.h"
namespace {
-// Timeout value for media codec operations. Because the first
-// DequeInputBuffer() can take about 150 milliseconds, use 250 milliseconds
-// here. See b/9357571.
-const int kMediaCodecTimeoutInMilliseconds = 250;
-
// Use 16bit PCM for audio output. Keep this value in sync with the output
// format we passed to AudioTrack in MediaCodecBridge.
const int kBytesPerAudioOutputSample = 2;
-
-class DecoderThread : public base::Thread {
- public:
- virtual ~DecoderThread() {}
- protected:
- DecoderThread(const char* name) : base::Thread(name) { Start(); }
-};
-
-class AudioDecoderThread : public DecoderThread {
- public:
- AudioDecoderThread() : DecoderThread("MediaSource_AudioDecoderThread") {}
-};
-
-class VideoDecoderThread : public DecoderThread {
- public:
- VideoDecoderThread() : DecoderThread("MediaSource_VideoDecoderThread") {}
-};
-
-// TODO(qinmin): Check if it is tolerable to use worker pool to handle all the
-// decoding tasks so that we don't need the global threads here.
-// http://crbug.com/245750
-base::LazyInstance<AudioDecoderThread>::Leaky
- g_audio_decoder_thread = LAZY_INSTANCE_INITIALIZER;
-
-base::LazyInstance<VideoDecoderThread>::Leaky
- g_video_decoder_thread = LAZY_INSTANCE_INITIALIZER;
-
}
namespace media {
-MediaDecoderJob::MediaDecoderJob(
- const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
- MediaCodecBridge* media_codec_bridge,
- bool is_audio)
- : ui_loop_(base::MessageLoopProxy::current()),
- decoder_loop_(decoder_loop),
- media_codec_bridge_(media_codec_bridge),
- needs_flush_(false),
- is_audio_(is_audio),
- input_eos_encountered_(false),
- weak_this_(this),
- is_decoding_(false) {
-}
-
-MediaDecoderJob::~MediaDecoderJob() {}
-
-// Class for managing audio decoding jobs.
-class AudioDecoderJob : public MediaDecoderJob {
- public:
- virtual ~AudioDecoderJob() {}
-
- static AudioDecoderJob* Create(
- const AudioCodec audio_codec, int sample_rate, int channel_count,
- const uint8* extra_data, size_t extra_data_size, jobject media_crypto);
-
- void SetVolume(double volume);
-
- private:
- AudioDecoderJob(MediaCodecBridge* media_codec_bridge);
-};
-
-// Class for managing video decoding jobs.
-class VideoDecoderJob : public MediaDecoderJob {
- public:
- virtual ~VideoDecoderJob() {}
-
- static VideoDecoderJob* Create(
- const VideoCodec video_codec, const gfx::Size& size, jobject surface,
- jobject media_crypto);
-
- private:
- VideoDecoderJob(MediaCodecBridge* media_codec_bridge);
-};
-
-void MediaDecoderJob::Decode(
- const AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback) {
- DCHECK(!is_decoding_);
- DCHECK(ui_loop_->BelongsToCurrentThread());
- is_decoding_ = true;
- decoder_loop_->PostTask(FROM_HERE, base::Bind(
- &MediaDecoderJob::DecodeInternal, base::Unretained(this), unit,
- start_time_ticks, start_presentation_timestamp, needs_flush_,
- callback));
- needs_flush_ = false;
-}
-
-MediaDecoderJob::DecodeStatus MediaDecoderJob::QueueInputBuffer(
- const AccessUnit& unit) {
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
- kMediaCodecTimeoutInMilliseconds);
- int input_buf_index = media_codec_bridge_->DequeueInputBuffer(timeout);
- if (input_buf_index == MediaCodecBridge::INFO_MEDIA_CODEC_ERROR)
- return DECODE_FAILED;
- if (input_buf_index == MediaCodecBridge::INFO_TRY_AGAIN_LATER)
- return DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER;
-
- // TODO(qinmin): skip frames if video is falling far behind.
- DCHECK(input_buf_index >= 0);
- if (unit.end_of_stream || unit.data.empty()) {
- media_codec_bridge_->QueueEOS(input_buf_index);
- return DECODE_INPUT_END_OF_STREAM;
- }
- if (unit.key_id.empty()) {
- media_codec_bridge_->QueueInputBuffer(
- input_buf_index, &unit.data[0], unit.data.size(), unit.timestamp);
- } else {
- if (unit.iv.empty() || unit.subsamples.empty()) {
- LOG(ERROR) << "The access unit doesn't have iv or subsamples while it "
- << "has key IDs!";
- return DECODE_FAILED;
- }
- media_codec_bridge_->QueueSecureInputBuffer(
- input_buf_index, &unit.data[0], unit.data.size(),
- reinterpret_cast<const uint8*>(&unit.key_id[0]), unit.key_id.size(),
- reinterpret_cast<const uint8*>(&unit.iv[0]), unit.iv.size(),
- &unit.subsamples[0], unit.subsamples.size(), unit.timestamp);
- }
-
- return DECODE_SUCCEEDED;
-}
-
-void MediaDecoderJob::DecodeInternal(
- const AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
- bool needs_flush,
- const MediaDecoderJob::DecoderCallback& callback) {
- if (needs_flush) {
- DVLOG(1) << "DecodeInternal needs flush.";
- input_eos_encountered_ = false;
- int reset_status = media_codec_bridge_->Reset();
- if (0 != reset_status) {
- ui_loop_->PostTask(FROM_HERE, base::Bind(
- callback, DECODE_FAILED, start_presentation_timestamp, 0));
- return;
+// static
+bool MediaSourcePlayer::IsTypeSupported(
+ const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ const std::string& container,
+ const std::vector<std::string>& codecs) {
+ if (!MediaDrmBridge::IsCryptoSchemeSupported(scheme_uuid, container)) {
+ DVLOG(1) << "UUID and container '" << container << "' not supported.";
+ return false;
+ }
+
+ if (!MediaDrmBridge::IsSecurityLevelSupported(scheme_uuid, security_level)) {
+ DVLOG(1) << "UUID and security level '" << security_level
+ << "' not supported.";
+ return false;
+ }
+
+ bool is_secure = MediaDrmBridge::IsSecureDecoderRequired(security_level);
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ if (!MediaCodecBridge::CanDecode(codecs[i], is_secure)) {
+ DVLOG(1) << "Codec '" << codecs[i] << "' "
+ << (is_secure ? "in secure mode " : "") << "not supported.";
+ return false;
}
}
- DecodeStatus decode_status = DECODE_INPUT_END_OF_STREAM;
- if (!input_eos_encountered_) {
- decode_status = QueueInputBuffer(unit);
- if (decode_status == DECODE_INPUT_END_OF_STREAM) {
- input_eos_encountered_ = true;
- } else if (decode_status != DECODE_SUCCEEDED) {
- ui_loop_->PostTask(FROM_HERE,
- base::Bind(callback, decode_status,
- start_presentation_timestamp, 0));
- return;
- }
- }
-
- size_t offset = 0;
- size_t size = 0;
- base::TimeDelta presentation_timestamp;
- bool end_of_stream = false;
-
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
- kMediaCodecTimeoutInMilliseconds);
- int outputBufferIndex = media_codec_bridge_->DequeueOutputBuffer(
- timeout, &offset, &size, &presentation_timestamp, &end_of_stream);
-
- if (end_of_stream)
- decode_status = DECODE_OUTPUT_END_OF_STREAM;
- switch (outputBufferIndex) {
- case MediaCodecBridge::INFO_OUTPUT_BUFFERS_CHANGED:
- DCHECK(decode_status != DECODE_INPUT_END_OF_STREAM);
- media_codec_bridge_->GetOutputBuffers();
- break;
- case MediaCodecBridge::INFO_OUTPUT_FORMAT_CHANGED:
- DCHECK(decode_status != DECODE_INPUT_END_OF_STREAM);
- // TODO(qinmin): figure out what we should do if format changes.
- decode_status = DECODE_FORMAT_CHANGED;
- break;
- case MediaCodecBridge::INFO_TRY_AGAIN_LATER:
- decode_status = DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER;
- break;
- case MediaCodecBridge::INFO_MEDIA_CODEC_ERROR:
- decode_status = DECODE_FAILED;
- break;
- default:
- DCHECK_LE(0, outputBufferIndex);
- base::TimeDelta time_to_render;
- DCHECK(!start_time_ticks.is_null());
- if (!is_audio_) {
- time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
- start_time_ticks + start_presentation_timestamp);
- }
- if (time_to_render >= base::TimeDelta()) {
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
- weak_this_.GetWeakPtr(), outputBufferIndex, size,
- presentation_timestamp, callback, decode_status),
- time_to_render);
- } else {
- // TODO(qinmin): The codec is lagging behind, need to recalculate the
- // |start_presentation_timestamp_| and |start_time_ticks_|.
- DVLOG(1) << (is_audio_ ? "audio " : "video ")
- << "codec is lagging behind :" << time_to_render.InMicroseconds();
- ReleaseOutputBuffer(outputBufferIndex, size, presentation_timestamp,
- callback, decode_status);
- }
- return;
- }
- ui_loop_->PostTask(FROM_HERE, base::Bind(
- callback, decode_status, start_presentation_timestamp, 0));
-}
-
-void MediaDecoderJob::ReleaseOutputBuffer(
- int outputBufferIndex, size_t size,
- const base::TimeDelta& presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback, DecodeStatus status) {
- // TODO(qinmin): Refactor this function. Maybe AudioDecoderJob should provide
- // its own ReleaseOutputBuffer().
- if (is_audio_) {
- static_cast<AudioCodecBridge*>(media_codec_bridge_.get())->PlayOutputBuffer(
- outputBufferIndex, size);
- }
- if (status != DECODE_OUTPUT_END_OF_STREAM || size != 0u)
- media_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, !is_audio_);
- ui_loop_->PostTask(FROM_HERE, base::Bind(
- callback, status, presentation_timestamp, is_audio_ ? size : 0));
-}
-
-void MediaDecoderJob::OnDecodeCompleted() {
- DCHECK(ui_loop_->BelongsToCurrentThread());
- is_decoding_ = false;
-}
-
-void MediaDecoderJob::Flush() {
- // Do nothing, flush when the next Decode() happens.
- needs_flush_ = true;
-}
-
-void MediaDecoderJob::Release() {
- // If |decoding_| is false, there is nothing running on the decoder thread.
- // So it is safe to delete the MediaDecoderJob on the UI thread. However,
- // if we post a task to the decoder thread to delete object, then we cannot
- // immediately pass the surface to a new MediaDecoderJob instance because
- // the java surface is still owned by the old object. New decoder creation
- // will be blocked on the UI thread until the previous decoder gets deleted.
- // This introduces extra latency during config changes, and makes the logic in
- // MediaSourcePlayer more complicated.
- //
- // TODO(qinmin): Figure out the logic to passing the surface to a new
- // MediaDecoderJob instance after the previous one gets deleted on the decoder
- // thread.
- if (is_decoding_ && !decoder_loop_->BelongsToCurrentThread()) {
- DCHECK(ui_loop_->BelongsToCurrentThread());
- decoder_loop_->DeleteSoon(FROM_HERE, this);
- } else {
- delete this;
- }
-}
-
-VideoDecoderJob* VideoDecoderJob::Create(
- const VideoCodec video_codec, const gfx::Size& size, jobject surface,
- jobject media_crypto) {
- scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::Create(video_codec));
- if (codec && codec->Start(video_codec, size, surface, media_crypto))
- return new VideoDecoderJob(codec.release());
- return NULL;
-}
-
-VideoDecoderJob::VideoDecoderJob(MediaCodecBridge* media_codec_bridge)
- : MediaDecoderJob(g_video_decoder_thread.Pointer()->message_loop_proxy(),
- media_codec_bridge,
- false) {}
-
-AudioDecoderJob* AudioDecoderJob::Create(
- const AudioCodec audio_codec,
- int sample_rate,
- int channel_count,
- const uint8* extra_data,
- size_t extra_data_size,
- jobject media_crypto) {
- scoped_ptr<AudioCodecBridge> codec(AudioCodecBridge::Create(audio_codec));
- if (codec && codec->Start(audio_codec, sample_rate, channel_count, extra_data,
- extra_data_size, true, media_crypto)) {
- return new AudioDecoderJob(codec.release());
- }
- return NULL;
-}
-
-AudioDecoderJob::AudioDecoderJob(MediaCodecBridge* media_codec_bridge)
- : MediaDecoderJob(g_audio_decoder_thread.Pointer()->message_loop_proxy(),
- media_codec_bridge,
- true) {}
-
-void AudioDecoderJob::SetVolume(double volume) {
- static_cast<AudioCodecBridge*>(media_codec_bridge_.get())->SetVolume(volume);
+ return true;
}
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
- MediaPlayerManager* manager)
+ MediaPlayerManager* manager,
+ int demuxer_client_id,
+ DemuxerAndroid* demuxer)
: MediaPlayerAndroid(player_id, manager),
+ demuxer_client_id_(demuxer_client_id),
+ demuxer_(demuxer),
pending_event_(NO_EVENT_PENDING),
seek_request_id_(0),
width_(0),
@@ -345,35 +81,54 @@ MediaSourcePlayer::MediaSourcePlayer(
clock_(&default_tick_clock_),
reconfig_audio_decoder_(false),
reconfig_video_decoder_(false),
- audio_access_unit_index_(0),
- video_access_unit_index_(0),
- waiting_for_audio_data_(false),
- waiting_for_video_data_(false),
- sync_decoder_jobs_(true),
weak_this_(this),
- drm_bridge_(NULL) {
+ drm_bridge_(NULL),
+ is_waiting_for_key_(false) {
+ demuxer_->AddDemuxerClient(demuxer_client_id_, this);
}
MediaSourcePlayer::~MediaSourcePlayer() {
+ demuxer_->RemoveDemuxerClient(demuxer_client_id_);
Release();
}
void MediaSourcePlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
- // Ignore non-empty surface that is unprotected if |is_video_encrypted_| is
- // true.
- if (is_video_encrypted_ && !surface.IsEmpty() && !surface.is_protected())
+ // For an empty surface, always pass it to the decoder job so that it
+ // can detach from the current one. Otherwise, don't pass an unprotected
+ // surface if the video content requires a protected one.
+ if (!surface.IsEmpty() &&
+ IsProtectedSurfaceRequired() && !surface.is_protected()) {
return;
+ }
surface_ = surface.Pass();
- pending_event_ |= SURFACE_CHANGE_EVENT_PENDING;
- if (pending_event_ & SEEK_EVENT_PENDING) {
+
+ // If there is a pending surface change event, just wait for it to be
+ // processed.
+ if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
+ return;
+ SetPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
+ if (IsEventPending(SEEK_EVENT_PENDING)) {
// Waiting for the seek to finish.
return;
}
+
// Setting a new surface will require a new MediaCodec to be created.
// Request a seek so that the new decoder will decode an I-frame first.
// Or otherwise, the new MediaCodec might crash. See b/8950387.
- pending_event_ |= SEEK_EVENT_PENDING;
+ ScheduleSeekEventAndStopDecoding();
+}
+
+void MediaSourcePlayer::ScheduleSeekEventAndStopDecoding() {
+ if (audio_decoder_job_ && audio_decoder_job_->is_decoding())
+ audio_decoder_job_->StopDecode();
+ if (video_decoder_job_ && video_decoder_job_->is_decoding())
+ video_decoder_job_->StopDecode();
+
+ if (IsEventPending(SEEK_EVENT_PENDING))
+ return;
+
+ SetPendingEvent(SEEK_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -387,15 +142,19 @@ bool MediaSourcePlayer::Seekable() {
}
void MediaSourcePlayer::Start() {
+ DVLOG(1) << __FUNCTION__;
+
playing_ = true;
- if (is_video_encrypted_)
+ if (IsProtectedSurfaceRequired())
manager()->OnProtectedSurfaceRequested(player_id());
StartInternal();
}
-void MediaSourcePlayer::Pause() {
+void MediaSourcePlayer::Pause(bool is_media_related_action) {
+ DVLOG(1) << __FUNCTION__;
+
// Since decoder jobs have their own thread, decoding is not fully paused
// until all the decoder jobs call MediaDecoderCallback(). It is possible
// that Start() is called while the player is waiting for
@@ -418,11 +177,12 @@ int MediaSourcePlayer::GetVideoHeight() {
}
void MediaSourcePlayer::SeekTo(base::TimeDelta timestamp) {
+ DVLOG(1) << __FUNCTION__ << "(" << timestamp.InSecondsF() << ")";
+
clock_.SetTime(timestamp, timestamp);
if (audio_timestamp_helper_)
audio_timestamp_helper_->SetBaseTimestamp(timestamp);
- pending_event_ |= SEEK_EVENT_PENDING;
- ProcessPendingEvents();
+ ScheduleSeekEventAndStopDecoding();
}
base::TimeDelta MediaSourcePlayer::GetCurrentTime() {
@@ -434,13 +194,14 @@ base::TimeDelta MediaSourcePlayer::GetDuration() {
}
void MediaSourcePlayer::Release() {
- ClearDecodingData();
+ DVLOG(1) << __FUNCTION__;
audio_decoder_job_.reset();
video_decoder_job_.reset();
reconfig_audio_decoder_ = false;
reconfig_video_decoder_ = false;
playing_ = false;
pending_event_ = NO_EVENT_PENDING;
+ decoder_starvation_callback_.Cancel();
surface_ = gfx::ScopedJavaSurface();
ReleaseMediaResourcesFromManager();
}
@@ -450,6 +211,16 @@ void MediaSourcePlayer::SetVolume(double volume) {
SetVolumeInternal();
}
+void MediaSourcePlayer::OnKeyAdded() {
+ DVLOG(1) << __FUNCTION__;
+ if (!is_waiting_for_key_)
+ return;
+
+ is_waiting_for_key_ = false;
+ if (playing_)
+ StartInternal();
+}
+
bool MediaSourcePlayer::CanPause() {
return Seekable();
}
@@ -467,15 +238,20 @@ bool MediaSourcePlayer::IsPlayerReady() {
}
void MediaSourcePlayer::StartInternal() {
+ DVLOG(1) << __FUNCTION__;
// If there are pending events, wait for them finish.
if (pending_event_ != NO_EVENT_PENDING)
return;
+ // When we start, we'll have new demuxed data coming in. This new data could
+ // be clear (not encrypted) or encrypted with different keys. So
+ // |is_waiting_for_key_| condition may not be true anymore.
+ is_waiting_for_key_ = false;
+
// Create decoder jobs if they are not created
ConfigureAudioDecoderJob();
ConfigureVideoDecoderJob();
-
// If one of the decoder job is not ready, do nothing.
if ((HasAudio() && !audio_decoder_job_) ||
(HasVideo() && !video_decoder_job_)) {
@@ -484,20 +260,21 @@ void MediaSourcePlayer::StartInternal() {
audio_finished_ = false;
video_finished_ = false;
- sync_decoder_jobs_ = true;
- SyncAndStartDecoderJobs();
+ SetPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
+ ProcessPendingEvents();
}
-void MediaSourcePlayer::DemuxerReady(
- const MediaPlayerHostMsg_DemuxerReady_Params& params) {
- duration_ = base::TimeDelta::FromMilliseconds(params.duration_ms);
+void MediaSourcePlayer::OnDemuxerConfigsAvailable(
+ const DemuxerConfigs& configs) {
+ DVLOG(1) << __FUNCTION__;
+ duration_ = base::TimeDelta::FromMilliseconds(configs.duration_ms);
clock_.SetDuration(duration_);
- audio_codec_ = params.audio_codec;
- num_channels_ = params.audio_channels;
- sampling_rate_ = params.audio_sampling_rate;
- is_audio_encrypted_ = params.is_audio_encrypted;
- audio_extra_data_ = params.audio_extra_data;
+ audio_codec_ = configs.audio_codec;
+ num_channels_ = configs.audio_channels;
+ sampling_rate_ = configs.audio_sampling_rate;
+ is_audio_encrypted_ = configs.is_audio_encrypted;
+ audio_extra_data_ = configs.audio_extra_data;
if (HasAudio()) {
DCHECK_GT(num_channels_, 0);
audio_timestamp_helper_.reset(new AudioTimestampHelper(sampling_rate_));
@@ -506,269 +283,274 @@ void MediaSourcePlayer::DemuxerReady(
audio_timestamp_helper_.reset();
}
- video_codec_ = params.video_codec;
- width_ = params.video_size.width();
- height_ = params.video_size.height();
- is_video_encrypted_ = params.is_video_encrypted;
+ video_codec_ = configs.video_codec;
+ width_ = configs.video_size.width();
+ height_ = configs.video_size.height();
+ is_video_encrypted_ = configs.is_video_encrypted;
OnMediaMetadataChanged(duration_, width_, height_, true);
- if (pending_event_ & CONFIG_CHANGE_EVENT_PENDING) {
+ if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
if (reconfig_audio_decoder_)
ConfigureAudioDecoderJob();
// If there is a pending surface change, we can merge it with the config
// change.
if (reconfig_video_decoder_) {
- pending_event_ &= ~SURFACE_CHANGE_EVENT_PENDING;
+ if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
+ ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
ConfigureVideoDecoderJob();
}
- pending_event_ &= ~CONFIG_CHANGE_EVENT_PENDING;
+
+ ClearPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
+
+ // Resume decoding after the config change if we are still playing.
if (playing_)
StartInternal();
}
}
-void MediaSourcePlayer::ReadFromDemuxerAck(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params) {
- DCHECK_LT(0u, params.access_units.size());
- if (params.type == DemuxerStream::AUDIO)
- waiting_for_audio_data_ = false;
- else
- waiting_for_video_data_ = false;
-
- // If there is a pending seek request, ignore the data from the chunk demuxer.
- // The data will be requested later when OnSeekRequestAck() is called.
- if (pending_event_ & SEEK_EVENT_PENDING)
- return;
-
- if (params.type == DemuxerStream::AUDIO) {
- DCHECK_EQ(0u, audio_access_unit_index_);
- received_audio_ = params;
- } else {
- DCHECK_EQ(0u, video_access_unit_index_);
- received_video_ = params;
- }
-
- if (pending_event_ != NO_EVENT_PENDING || !playing_)
- return;
-
- if (sync_decoder_jobs_) {
- SyncAndStartDecoderJobs();
- return;
- }
-
- if (params.type == DemuxerStream::AUDIO)
- DecodeMoreAudio();
- else
- DecodeMoreVideo();
+void MediaSourcePlayer::OnDemuxerDataAvailable(const DemuxerData& data) {
+ DVLOG(1) << __FUNCTION__ << "(" << data.type << ")";
+ DCHECK_LT(0u, data.access_units.size());
+ if (data.type == DemuxerStream::AUDIO && audio_decoder_job_)
+ audio_decoder_job_->OnDataReceived(data);
+ else if (data.type == DemuxerStream::VIDEO && video_decoder_job_)
+ video_decoder_job_->OnDataReceived(data);
}
-void MediaSourcePlayer::DurationChanged(const base::TimeDelta& duration) {
+void MediaSourcePlayer::OnDemuxerDurationChanged(base::TimeDelta duration) {
duration_ = duration;
clock_.SetDuration(duration_);
}
+base::android::ScopedJavaLocalRef<jobject> MediaSourcePlayer::GetMediaCrypto() {
+ base::android::ScopedJavaLocalRef<jobject> media_crypto;
+ if (drm_bridge_)
+ media_crypto = drm_bridge_->GetMediaCrypto();
+ return media_crypto;
+}
+
+void MediaSourcePlayer::OnMediaCryptoReady() {
+ DCHECK(!drm_bridge_->GetMediaCrypto().is_null());
+ drm_bridge_->SetMediaCryptoReadyCB(base::Closure());
+
+ if (playing_)
+ StartInternal();
+}
+
void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
// Currently we don't support DRM change during the middle of playback, even
// if the player is paused.
// TODO(qinmin): support DRM change after playback has started.
// http://crbug.com/253792.
if (GetCurrentTime() > base::TimeDelta()) {
- LOG(INFO) << "Setting DRM bridge after play back has started. "
+ LOG(INFO) << "Setting DRM bridge after playback has started. "
<< "This is not well supported!";
}
drm_bridge_ = drm_bridge;
+ if (drm_bridge_->GetMediaCrypto().is_null()) {
+ drm_bridge_->SetMediaCryptoReadyCB(base::Bind(
+ &MediaSourcePlayer::OnMediaCryptoReady, weak_this_.GetWeakPtr()));
+ return;
+ }
+
if (playing_)
StartInternal();
}
-void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
- DVLOG(1) << "OnSeekRequestAck(" << seek_request_id << ")";
+void MediaSourcePlayer::OnDemuxerSeeked(unsigned seek_request_id) {
+ DVLOG(1) << __FUNCTION__ << "(" << seek_request_id << ")";
// Do nothing until the most recent seek request is processed.
if (seek_request_id_ != seek_request_id)
return;
- pending_event_ &= ~SEEK_EVENT_PENDING;
+
+ ClearPendingEvent(SEEK_EVENT_PENDING);
+
OnSeekComplete();
ProcessPendingEvents();
}
void MediaSourcePlayer::UpdateTimestamps(
const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
+ base::TimeDelta new_max_time = presentation_timestamp;
+
if (audio_output_bytes > 0) {
audio_timestamp_helper_->AddFrames(
audio_output_bytes / (kBytesPerAudioOutputSample * num_channels_));
- clock_.SetMaxTime(audio_timestamp_helper_->GetTimestamp());
- } else {
- clock_.SetMaxTime(presentation_timestamp);
+ new_max_time = audio_timestamp_helper_->GetTimestamp();
}
+ clock_.SetMaxTime(new_max_time);
OnTimeUpdated();
}
void MediaSourcePlayer::ProcessPendingEvents() {
+ DVLOG(1) << __FUNCTION__ << " : 0x" << std::hex << pending_event_;
// Wait for all the decoding jobs to finish before processing pending tasks.
- if ((audio_decoder_job_ && audio_decoder_job_->is_decoding()) ||
- (video_decoder_job_ && video_decoder_job_->is_decoding())) {
+ if (video_decoder_job_ && video_decoder_job_->is_decoding()) {
+ DVLOG(1) << __FUNCTION__ << " : A video job is still decoding.";
+ return;
+ }
+
+ if (audio_decoder_job_ && audio_decoder_job_->is_decoding()) {
+ DVLOG(1) << __FUNCTION__ << " : An audio job is still decoding.";
+ return;
+ }
+
+ if (IsEventPending(PREFETCH_DONE_EVENT_PENDING)) {
+ DVLOG(1) << __FUNCTION__ << " : PREFETCH_DONE still pending.";
return;
}
- if (pending_event_ & SEEK_EVENT_PENDING) {
+ if (IsEventPending(SEEK_EVENT_PENDING)) {
+ int seek_request_id = ++seek_request_id_;
+ DVLOG(1) << __FUNCTION__ << " : Handling SEEK_EVENT: " << seek_request_id;
ClearDecodingData();
- manager()->OnMediaSeekRequest(
- player_id(), GetCurrentTime(), ++seek_request_id_);
+ demuxer_->RequestDemuxerSeek(
+ demuxer_client_id_, GetCurrentTime(), seek_request_id);
return;
}
start_time_ticks_ = base::TimeTicks();
- if (pending_event_ & CONFIG_CHANGE_EVENT_PENDING) {
+ if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
+ DVLOG(1) << __FUNCTION__ << " : Handling CONFIG_CHANGE_EVENT.";
DCHECK(reconfig_audio_decoder_ || reconfig_video_decoder_);
- manager()->OnMediaConfigRequest(player_id());
+ demuxer_->RequestDemuxerConfigs(demuxer_client_id_);
return;
}
- if (pending_event_ & SURFACE_CHANGE_EVENT_PENDING) {
+ if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING)) {
+ DVLOG(1) << __FUNCTION__ << " : Handling SURFACE_CHANGE_EVENT.";
video_decoder_job_.reset();
ConfigureVideoDecoderJob();
- pending_event_ &= ~SURFACE_CHANGE_EVENT_PENDING;
+ ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
}
+ if (IsEventPending(PREFETCH_REQUEST_EVENT_PENDING)) {
+ DVLOG(1) << __FUNCTION__ << " : Handling PREFETCH_REQUEST_EVENT.";
+ int count = (audio_decoder_job_ ? 1 : 0) + (video_decoder_job_ ? 1 : 0);
+
+ base::Closure barrier = BarrierClosure(count, base::Bind(
+ &MediaSourcePlayer::OnPrefetchDone, weak_this_.GetWeakPtr()));
+
+ if (audio_decoder_job_)
+ audio_decoder_job_->Prefetch(barrier);
+
+ if (video_decoder_job_)
+ video_decoder_job_->Prefetch(barrier);
+
+ SetPendingEvent(PREFETCH_DONE_EVENT_PENDING);
+ ClearPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
+ return;
+ }
+
+ DCHECK_EQ(pending_event_, NO_EVENT_PENDING);
+
+ // Now that all pending events have been handled, resume decoding if we are
+ // still playing.
if (playing_)
StartInternal();
}
void MediaSourcePlayer::MediaDecoderCallback(
- bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
+ bool is_audio, MediaCodecStatus status,
const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
- if (is_audio && audio_decoder_job_)
- audio_decoder_job_->OnDecodeCompleted();
- if (!is_audio && video_decoder_job_)
- video_decoder_job_->OnDecodeCompleted();
+ DVLOG(1) << __FUNCTION__ << ": " << is_audio << ", " << status;
+ DCHECK(!is_waiting_for_key_);
- if (is_audio)
+ bool is_clock_manager = is_audio || !HasAudio();
+
+ if (is_clock_manager)
decoder_starvation_callback_.Cancel();
- if (decode_status == MediaDecoderJob::DECODE_FAILED) {
+ if (status == MEDIA_CODEC_ERROR) {
Release();
OnMediaError(MEDIA_ERROR_DECODE);
return;
}
- // If the input reaches input EOS, there is no need to request new data.
- if (decode_status != MediaDecoderJob::DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER &&
- decode_status != MediaDecoderJob::DECODE_INPUT_END_OF_STREAM) {
- if (is_audio)
- audio_access_unit_index_++;
- else
- video_access_unit_index_++;
- }
-
if (pending_event_ != NO_EVENT_PENDING) {
ProcessPendingEvents();
return;
}
- if (decode_status == MediaDecoderJob::DECODE_SUCCEEDED &&
- (is_audio || !HasAudio())) {
- UpdateTimestamps(presentation_timestamp, audio_output_bytes);
- }
-
- if (decode_status == MediaDecoderJob::DECODE_OUTPUT_END_OF_STREAM) {
+ if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM) {
PlaybackCompleted(is_audio);
return;
}
+ if (status == MEDIA_CODEC_OK && is_clock_manager)
+ UpdateTimestamps(presentation_timestamp, audio_output_bytes);
+
if (!playing_) {
- if (is_audio || !HasAudio())
+ if (is_clock_manager)
clock_.Pause();
return;
}
- if (sync_decoder_jobs_) {
- SyncAndStartDecoderJobs();
+ if (status == MEDIA_CODEC_NO_KEY) {
+ is_waiting_for_key_ = true;
return;
}
- base::TimeDelta current_timestamp = GetCurrentTime();
+ // If the status is MEDIA_CODEC_STOPPED, stop decoding new data. The player is
+ // in the middle of a seek or stop event and needs to wait for the IPCs to
+ // come.
+ if (status == MEDIA_CODEC_STOPPED)
+ return;
+
+ if (status == MEDIA_CODEC_OK && is_clock_manager)
+ StartStarvationCallback(presentation_timestamp);
+
if (is_audio) {
- if (decode_status == MediaDecoderJob::DECODE_SUCCEEDED) {
- base::TimeDelta timeout =
- audio_timestamp_helper_->GetTimestamp() - current_timestamp;
- StartStarvationCallback(timeout);
- }
- if (!HasAudioData())
- RequestAudioData();
- else
- DecodeMoreAudio();
+ DecodeMoreAudio();
return;
}
- if (!HasAudio() && decode_status == MediaDecoderJob::DECODE_SUCCEEDED) {
- DCHECK(current_timestamp <= presentation_timestamp);
- // For video only streams, fps can be estimated from the difference
- // between the previous and current presentation timestamps. The
- // previous presentation timestamp is equal to current_timestamp.
- // TODO(qinmin): determine whether 2 is a good coefficient for estimating
- // video frame timeout.
- StartStarvationCallback(2 * (presentation_timestamp - current_timestamp));
- }
- if (!HasVideoData())
- RequestVideoData();
- else
- DecodeMoreVideo();
+ DecodeMoreVideo();
}
void MediaSourcePlayer::DecodeMoreAudio() {
+ DVLOG(1) << __FUNCTION__;
DCHECK(!audio_decoder_job_->is_decoding());
- DCHECK(HasAudioData());
-
- if (DemuxerStream::kConfigChanged ==
- received_audio_.access_units[audio_access_unit_index_].status) {
- // Wait for demuxer ready message.
- reconfig_audio_decoder_ = true;
- pending_event_ |= CONFIG_CHANGE_EVENT_PENDING;
- received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
- audio_access_unit_index_ = 0;
- ProcessPendingEvents();
+
+ if (audio_decoder_job_->Decode(
+ start_time_ticks_, start_presentation_timestamp_, base::Bind(
+ &MediaSourcePlayer::MediaDecoderCallback,
+ weak_this_.GetWeakPtr(), true))) {
return;
}
- audio_decoder_job_->Decode(
- received_audio_.access_units[audio_access_unit_index_],
- start_time_ticks_, start_presentation_timestamp_,
- base::Bind(&MediaSourcePlayer::MediaDecoderCallback,
- weak_this_.GetWeakPtr(), true));
+ // Failed to start the next decode.
+ // Wait for demuxer ready message.
+ reconfig_audio_decoder_ = true;
+ SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
+ ProcessPendingEvents();
}
void MediaSourcePlayer::DecodeMoreVideo() {
- DVLOG(1) << "DecodeMoreVideo()";
+ DVLOG(1) << __FUNCTION__;
DCHECK(!video_decoder_job_->is_decoding());
- DCHECK(HasVideoData());
-
- if (DemuxerStream::kConfigChanged ==
- received_video_.access_units[video_access_unit_index_].status) {
- // Wait for demuxer ready message.
- reconfig_video_decoder_ = true;
- pending_event_ |= CONFIG_CHANGE_EVENT_PENDING;
- received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
- video_access_unit_index_ = 0;
- ProcessPendingEvents();
+
+ if (video_decoder_job_->Decode(
+ start_time_ticks_, start_presentation_timestamp_, base::Bind(
+ &MediaSourcePlayer::MediaDecoderCallback,
+ weak_this_.GetWeakPtr(), false))) {
return;
}
- DVLOG(3) << "VideoDecoderJob::Decode(" << video_access_unit_index_ << ", "
- << start_time_ticks_.ToInternalValue() << ", "
- << start_presentation_timestamp_.InMilliseconds() << ")";
- video_decoder_job_->Decode(
- received_video_.access_units[video_access_unit_index_],
- start_time_ticks_, start_presentation_timestamp_,
- base::Bind(&MediaSourcePlayer::MediaDecoderCallback,
- weak_this_.GetWeakPtr(), false));
+ // Failed to start the next decode.
+ // Wait for demuxer ready message.
+ reconfig_video_decoder_ = true;
+ SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
+ ProcessPendingEvents();
}
void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
+ DVLOG(1) << __FUNCTION__ << "(" << is_audio << ")";
if (is_audio)
audio_finished_ = true;
else
@@ -783,18 +565,12 @@ void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
}
void MediaSourcePlayer::ClearDecodingData() {
- DVLOG(1) << "ClearDecodingData()";
+ DVLOG(1) << __FUNCTION__;
if (audio_decoder_job_)
audio_decoder_job_->Flush();
if (video_decoder_job_)
video_decoder_job_->Flush();
start_time_ticks_ = base::TimeTicks();
- received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
- received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
- audio_access_unit_index_ = 0;
- video_access_unit_index_ = 0;
- waiting_for_audio_data_ = false;
- waiting_for_video_data_ = false;
}
bool MediaSourcePlayer::HasVideo() {
@@ -815,26 +591,18 @@ void MediaSourcePlayer::ConfigureAudioDecoderJob() {
if (audio_decoder_job_ && !reconfig_audio_decoder_)
return;
- base::android::ScopedJavaLocalRef<jobject> media_codec;
- if (is_audio_encrypted_) {
- if (drm_bridge_) {
- media_codec = drm_bridge_->GetMediaCrypto();
- // TODO(qinmin): currently we assume MediaCrypto is available whenever
- // MediaDrmBridge is constructed. This will change if we want to support
- // more general uses cases of EME.
- DCHECK(!media_codec.is_null());
- } else {
- // Don't create the decoder job if |drm_bridge_| is not set,
- // so StartInternal() will not proceed.
- LOG(INFO) << "MediaDrmBridge is not available when creating decoder "
- << "for encrypted audio stream.";
- return;
- }
- }
+ base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
+ if (is_audio_encrypted_ && media_crypto.is_null())
+ return;
+
+ DCHECK(!audio_decoder_job_ || !audio_decoder_job_->is_decoding());
audio_decoder_job_.reset(AudioDecoderJob::Create(
audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
- audio_extra_data_.size(), media_codec.obj()));
+ audio_extra_data_.size(), media_crypto.obj(),
+ base::Bind(&DemuxerAndroid::RequestDemuxerData,
+ base::Unretained(demuxer_), demuxer_client_id_,
+ DemuxerStream::AUDIO)));
if (audio_decoder_job_) {
SetVolumeInternal();
@@ -852,25 +620,27 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
if (video_decoder_job_ && !reconfig_video_decoder_)
return;
- base::android::ScopedJavaLocalRef<jobject> media_codec;
- if (is_video_encrypted_) {
- if (drm_bridge_) {
- media_codec = drm_bridge_->GetMediaCrypto();
- DCHECK(!media_codec.is_null());
- } else {
- LOG(INFO) << "MediaDrmBridge is not available when creating decoder "
- << "for encrypted video stream.";
- return;
- }
- }
+ base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
+ if (is_video_encrypted_ && media_crypto.is_null())
+ return;
+
+ DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
// Release the old VideoDecoderJob first so the surface can get released.
// Android does not allow 2 MediaCodec instances use the same surface.
video_decoder_job_.reset();
// Create the new VideoDecoderJob.
- video_decoder_job_.reset(VideoDecoderJob::Create(
- video_codec_, gfx::Size(width_, height_), surface_.j_surface().obj(),
- media_codec.obj()));
+ bool is_secure = IsProtectedSurfaceRequired();
+ video_decoder_job_.reset(
+ VideoDecoderJob::Create(video_codec_,
+ is_secure,
+ gfx::Size(width_, height_),
+ surface_.j_surface().obj(),
+ media_crypto.obj(),
+ base::Bind(&DemuxerAndroid::RequestDemuxerData,
+ base::Unretained(demuxer_),
+ demuxer_client_id_,
+ DemuxerStream::VIDEO)));
if (video_decoder_job_)
reconfig_video_decoder_ = false;
@@ -881,11 +651,36 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
}
void MediaSourcePlayer::OnDecoderStarved() {
- sync_decoder_jobs_ = true;
+ DVLOG(1) << __FUNCTION__;
+ SetPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
+ ProcessPendingEvents();
}
void MediaSourcePlayer::StartStarvationCallback(
- const base::TimeDelta& timeout) {
+ const base::TimeDelta& presentation_timestamp) {
+ // 20ms was chosen because it is the typical size of a compressed audio frame.
+ // Anything smaller than this would likely cause unnecessary cycling in and
+ // out of the prefetch state.
+ const base::TimeDelta kMinStarvationTimeout =
+ base::TimeDelta::FromMilliseconds(20);
+
+ base::TimeDelta current_timestamp = GetCurrentTime();
+ base::TimeDelta timeout;
+ if (HasAudio()) {
+ timeout = audio_timestamp_helper_->GetTimestamp() - current_timestamp;
+ } else {
+ DCHECK(current_timestamp <= presentation_timestamp);
+
+ // For video only streams, fps can be estimated from the difference
+ // between the previous and current presentation timestamps. The
+ // previous presentation timestamp is equal to current_timestamp.
+ // TODO(qinmin): determine whether 2 is a good coefficient for estimating
+ // video frame timeout.
+ timeout = 2 * (presentation_timestamp - current_timestamp);
+ }
+
+ timeout = std::max(timeout, kMinStarvationTimeout);
+
decoder_starvation_callback_.Reset(
base::Bind(&MediaSourcePlayer::OnDecoderStarved,
weak_this_.GetWeakPtr()));
@@ -893,66 +688,76 @@ void MediaSourcePlayer::StartStarvationCallback(
FROM_HERE, decoder_starvation_callback_.callback(), timeout);
}
-void MediaSourcePlayer::SyncAndStartDecoderJobs() {
- // For streams with both audio and video, send the request for video too.
- // However, don't wait for the response so that we won't have lots of
- // noticeable pauses in the audio. Video will sync with audio by itself.
- if (HasVideo() && !HasVideoData()) {
- RequestVideoData();
- if (!HasAudio())
- return;
- }
- if (HasAudio() && !HasAudioData()) {
- RequestAudioData();
+void MediaSourcePlayer::SetVolumeInternal() {
+ if (audio_decoder_job_ && volume_ >= 0)
+ audio_decoder_job_->SetVolume(volume_);
+}
+
+bool MediaSourcePlayer::IsProtectedSurfaceRequired() {
+ return is_video_encrypted_ &&
+ drm_bridge_ && drm_bridge_->IsProtectedSurfaceRequired();
+}
+
+void MediaSourcePlayer::OnPrefetchDone() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(!audio_decoder_job_ || !audio_decoder_job_->is_decoding());
+ DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
+ DCHECK(IsEventPending(PREFETCH_DONE_EVENT_PENDING));
+
+ ClearPendingEvent(PREFETCH_DONE_EVENT_PENDING);
+
+ if (pending_event_ != NO_EVENT_PENDING) {
+ ProcessPendingEvents();
return;
}
+
start_time_ticks_ = base::TimeTicks::Now();
start_presentation_timestamp_ = GetCurrentTime();
if (!clock_.IsPlaying())
clock_.Play();
- if (HasAudioData() && !audio_decoder_job_->is_decoding())
+
+ if (audio_decoder_job_)
DecodeMoreAudio();
- if (HasVideoData() && !video_decoder_job_->is_decoding())
+ if (video_decoder_job_)
DecodeMoreVideo();
- sync_decoder_jobs_ = false;
}
-void MediaSourcePlayer::RequestAudioData() {
- DVLOG(2) << "RequestAudioData()";
- DCHECK(HasAudio());
+const char* MediaSourcePlayer::GetEventName(PendingEventFlags event) {
+ static const char* kPendingEventNames[] = {
+ "SEEK",
+ "SURFACE_CHANGE",
+ "CONFIG_CHANGE",
+ "PREFETCH_REQUEST",
+ "PREFETCH_DONE",
+ };
- if (waiting_for_audio_data_)
- return;
+ int mask = 1;
+ for (size_t i = 0; i < arraysize(kPendingEventNames); ++i, mask <<= 1) {
+ if (event & mask)
+ return kPendingEventNames[i];
+ }
- manager()->OnReadFromDemuxer(player_id(), DemuxerStream::AUDIO);
- received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
- audio_access_unit_index_ = 0;
- waiting_for_audio_data_ = true;
+ return "UNKNOWN";
}
-void MediaSourcePlayer::RequestVideoData() {
- DVLOG(2) << "RequestVideoData()";
- DCHECK(HasVideo());
- if (waiting_for_video_data_)
- return;
-
- manager()->OnReadFromDemuxer(player_id(), DemuxerStream::VIDEO);
- received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
- video_access_unit_index_ = 0;
- waiting_for_video_data_ = true;
+bool MediaSourcePlayer::IsEventPending(PendingEventFlags event) const {
+ return pending_event_ & event;
}
-bool MediaSourcePlayer::HasAudioData() const {
- return audio_access_unit_index_ < received_audio_.access_units.size();
-}
+void MediaSourcePlayer::SetPendingEvent(PendingEventFlags event) {
+ DVLOG(1) << __FUNCTION__ << "(" << GetEventName(event) << ")";
+ DCHECK_NE(event, NO_EVENT_PENDING);
+ DCHECK(!IsEventPending(event));
-bool MediaSourcePlayer::HasVideoData() const {
- return video_access_unit_index_ < received_video_.access_units.size();
+ pending_event_ |= event;
}
-void MediaSourcePlayer::SetVolumeInternal() {
- if (audio_decoder_job_ && volume_ >= 0)
- audio_decoder_job_.get()->SetVolume(volume_);
+void MediaSourcePlayer::ClearPendingEvent(PendingEventFlags event) {
+ DVLOG(1) << __FUNCTION__ << "(" << GetEventName(event) << ")";
+ DCHECK_NE(event, NO_EVENT_PENDING);
+ DCHECK(IsEventPending(event)) << GetEventName(event);
+
+ pending_event_ &= ~event;
}
} // namespace media
diff --git a/chromium/media/base/android/media_source_player.h b/chromium/media/base/android/media_source_player.h
index 05fd224e04e..1708e39a260 100644
--- a/chromium/media/base/android/media_source_player.h
+++ b/chromium/media/base/android/media_source_player.h
@@ -18,130 +18,43 @@
#include "base/threading/thread.h"
#include "base/time/default_tick_clock.h"
#include "base/time/time.h"
-#include "media/base/android/demuxer_stream_player_params.h"
+#include "media/base/android/demuxer_android.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_decoder_job.h"
#include "media/base/android/media_player_android.h"
#include "media/base/clock.h"
#include "media/base/media_export.h"
-namespace base {
-class MessageLoopProxy;
-}
-
namespace media {
class AudioDecoderJob;
class AudioTimestampHelper;
class VideoDecoderJob;
-// Class for managing all the decoding tasks. Each decoding task will be posted
-// onto the same thread. The thread will be stopped once Stop() is called.
-class MediaDecoderJob {
- public:
- enum DecodeStatus {
- DECODE_SUCCEEDED,
- DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER,
- DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER,
- DECODE_FORMAT_CHANGED,
- DECODE_INPUT_END_OF_STREAM,
- DECODE_OUTPUT_END_OF_STREAM,
- DECODE_FAILED,
- };
-
- virtual ~MediaDecoderJob();
-
- // Callback when a decoder job finishes its work. Args: whether decode
- // finished successfully, presentation time, audio output bytes.
- typedef base::Callback<void(DecodeStatus, const base::TimeDelta&,
- size_t)> DecoderCallback;
-
- // Called by MediaSourcePlayer to decode some data.
- void Decode(const AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback);
-
- // Flush the decoder.
- void Flush();
-
- // Causes this instance to be deleted on the thread it is bound to.
- void Release();
-
- // Called on the UI thread to indicate that one decode cycle has completed.
- void OnDecodeCompleted();
-
- bool is_decoding() const { return is_decoding_; }
-
- protected:
- MediaDecoderJob(const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
- MediaCodecBridge* media_codec_bridge,
- bool is_audio);
-
- // Release the output buffer and render it.
- void ReleaseOutputBuffer(
- int outputBufferIndex, size_t size,
- const base::TimeDelta& presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback, DecodeStatus status);
-
- DecodeStatus QueueInputBuffer(const AccessUnit& unit);
-
- // Helper function to decoder data on |thread_|. |unit| contains all the data
- // to be decoded. |start_time_ticks| and |start_presentation_timestamp|
- // represent the system time and the presentation timestamp when the first
- // frame is rendered. We use these information to estimate when the current
- // frame should be rendered. If |needs_flush| is true, codec needs to be
- // flushed at the beginning of this call.
- void DecodeInternal(const AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
- bool needs_flush,
- const MediaDecoderJob::DecoderCallback& callback);
-
- // The UI message loop where callbacks should be dispatched.
- scoped_refptr<base::MessageLoopProxy> ui_loop_;
-
- // The message loop that decoder job runs on.
- scoped_refptr<base::MessageLoopProxy> decoder_loop_;
-
- // The media codec bridge used for decoding.
- scoped_ptr<MediaCodecBridge> media_codec_bridge_;
-
- // Whether the decoder needs to be flushed.
- bool needs_flush_;
-
- // Whether this is an audio decoder.
- bool is_audio_;
-
- // Whether input EOS is encountered.
- bool input_eos_encountered_;
-
- // Weak pointer passed to media decoder jobs for callbacks. It is bounded to
- // the decoder thread.
- base::WeakPtrFactory<MediaDecoderJob> weak_this_;
-
- // Whether the decoder is actively decoding data.
- bool is_decoding_;
-};
-
-struct DecoderJobDeleter {
- inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
-};
-
// This class handles media source extensions on Android. It uses Android
// MediaCodec to decode audio and video streams in two separate threads.
// IPC is being used to send data from the render process to this object.
// TODO(qinmin): use shared memory to send data between processes.
-class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
+class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
+ public DemuxerAndroidClient {
public:
- // Construct a MediaSourcePlayer object with all the needed media player
- // callbacks.
- MediaSourcePlayer(int player_id, MediaPlayerManager* manager);
+ // Constructs a player with the given IDs. |manager| and |demuxer| must
+ // outlive the lifetime of this object.
+ MediaSourcePlayer(int player_id,
+ MediaPlayerManager* manager,
+ int demuxer_client_id,
+ DemuxerAndroid* demuxer);
virtual ~MediaSourcePlayer();
+ static bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ const std::string& container,
+ const std::vector<std::string>& codecs);
+
// MediaPlayerAndroid implementation.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
- virtual void Pause() OVERRIDE;
+ virtual void Pause(bool is_media_related_action ALLOW_UNUSED) OVERRIDE;
virtual void SeekTo(base::TimeDelta timestamp) OVERRIDE;
virtual void Release() OVERRIDE;
virtual void SetVolume(double volume) OVERRIDE;
@@ -154,13 +67,14 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
virtual bool CanSeekForward() OVERRIDE;
virtual bool CanSeekBackward() OVERRIDE;
virtual bool IsPlayerReady() OVERRIDE;
- virtual void OnSeekRequestAck(unsigned seek_request_id) OVERRIDE;
- virtual void DemuxerReady(
- const MediaPlayerHostMsg_DemuxerReady_Params& params) OVERRIDE;
- virtual void ReadFromDemuxerAck(
- const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params) OVERRIDE;
- virtual void DurationChanged(const base::TimeDelta& duration) OVERRIDE;
virtual void SetDrmBridge(MediaDrmBridge* drm_bridge) OVERRIDE;
+ virtual void OnKeyAdded() OVERRIDE;
+
+ // DemuxerAndroidClient implementation.
+ virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) OVERRIDE;
+ virtual void OnDemuxerDataAvailable(const DemuxerData& params) OVERRIDE;
+ virtual void OnDemuxerSeeked(unsigned seek_request_id) OVERRIDE;
+ virtual void OnDemuxerDurationChanged(base::TimeDelta duration) OVERRIDE;
private:
// Update the current timestamp.
@@ -175,10 +89,16 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// Called when the decoder finishes its task.
void MediaDecoderCallback(
- bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
+ bool is_audio, MediaCodecStatus status,
const base::TimeDelta& presentation_timestamp,
size_t audio_output_bytes);
+ // Gets MediaCrypto object from |drm_bridge_|.
+ base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
+
+ // Callback to notify that MediaCrypto is ready in |drm_bridge_|.
+ void OnMediaCryptoReady();
+
// Handle pending events when all the decoder jobs finished.
void ProcessPendingEvents();
@@ -204,32 +124,45 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
void OnDecoderStarved();
// Starts the |decoder_starvation_callback_| task with the timeout value.
- void StartStarvationCallback(const base::TimeDelta& timeout);
+ // |presentation_timestamp| - The presentation timestamp used for starvation
+ // timeout computations. It represents the timestamp of the last piece of
+ // decoded data.
+ void StartStarvationCallback(const base::TimeDelta& presentation_timestamp);
- // Called to sync decoder jobs. This call requests data from chunk demuxer
- // first. Then it updates |start_time_ticks_| and
- // |start_presentation_timestamp_| so that video can resync with audio.
- void SyncAndStartDecoderJobs();
-
- // Functions that send IPC requests to the renderer process for more
- // audio/video data. Returns true if a request has been sent and the decoder
- // needs to wait, or false otherwise.
- void RequestAudioData();
- void RequestVideoData();
-
- // Check whether audio or video data is available for decoders to consume.
- bool HasAudioData() const;
- bool HasVideoData() const;
+ // Schedules a seek event in |pending_events_| and calls StopDecode() on all
+ // the MediaDecoderJobs.
+ void ScheduleSeekEventAndStopDecoding();
// Helper function to set the volume.
void SetVolumeInternal();
+ // Helper function to determine whether a protected surface is needed for
+ // video playback.
+ bool IsProtectedSurfaceRequired();
+
+ // Called when a MediaDecoderJob finishes prefetching data. Once all
+ // MediaDecoderJobs have prefetched data, then this method updates
+ // |start_time_ticks_| and |start_presentation_timestamp_| so that video can
+ // resync with audio and starts decoding.
+ void OnPrefetchDone();
+
enum PendingEventFlags {
NO_EVENT_PENDING = 0,
SEEK_EVENT_PENDING = 1 << 0,
SURFACE_CHANGE_EVENT_PENDING = 1 << 1,
CONFIG_CHANGE_EVENT_PENDING = 1 << 2,
+ PREFETCH_REQUEST_EVENT_PENDING = 1 << 3,
+ PREFETCH_DONE_EVENT_PENDING = 1 << 4,
};
+
+ static const char* GetEventName(PendingEventFlags event);
+ bool IsEventPending(PendingEventFlags event) const;
+ void SetPendingEvent(PendingEventFlags event);
+ void ClearPendingEvent(PendingEventFlags event);
+
+ int demuxer_client_id_;
+ DemuxerAndroid* demuxer_;
+
// Pending event that the player needs to do.
unsigned pending_event_;
@@ -271,31 +204,18 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// The surface object currently owned by the player.
gfx::ScopedJavaSurface surface_;
- // Decoder jobs
- scoped_ptr<AudioDecoderJob, DecoderJobDeleter> audio_decoder_job_;
- scoped_ptr<VideoDecoderJob, DecoderJobDeleter> video_decoder_job_;
+ // Decoder jobs.
+ scoped_ptr<AudioDecoderJob, MediaDecoderJob::Deleter> audio_decoder_job_;
+ scoped_ptr<VideoDecoderJob, MediaDecoderJob::Deleter> video_decoder_job_;
bool reconfig_audio_decoder_;
bool reconfig_video_decoder_;
- // These variables keep track of the current decoding data.
- // TODO(qinmin): remove these variables when we no longer relies on IPC for
- // data passing.
- size_t audio_access_unit_index_;
- size_t video_access_unit_index_;
- bool waiting_for_audio_data_;
- bool waiting_for_video_data_;
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params received_audio_;
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params received_video_;
-
// A cancelable task that is posted when the audio decoder starts requesting
// new data. This callback runs if no data arrives before the timeout period
// elapses.
base::CancelableClosure decoder_starvation_callback_;
- // Whether the audio and video decoder jobs should resync with each other.
- bool sync_decoder_jobs_;
-
// Object to calculate the current audio timestamp for A/V sync.
scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
@@ -304,6 +224,11 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
MediaDrmBridge* drm_bridge_;
+ // No decryption key available to decrypt the encrypted buffer. In this case,
+ // the player should pause. When a new key is added (OnKeyAdded()), we should
+ // try to start playback again.
+ bool is_waiting_for_key_;
+
friend class MediaSourcePlayerTest;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayer);
};
diff --git a/chromium/media/base/android/media_source_player_unittest.cc b/chromium/media/base/android/media_source_player_unittest.cc
index 40d28e43f56..edf7016010c 100644
--- a/chromium/media/base/android/media_source_player_unittest.cc
+++ b/chromium/media/base/android/media_source_player_unittest.cc
@@ -6,23 +6,31 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
+#include "base/strings/stringprintf.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_source_player.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_data_util.h"
#include "testing/gmock/include/gmock/gmock.h"
-#include "ui/gl/android/surface_texture_bridge.h"
+#include "ui/gl/android/surface_texture.h"
namespace media {
static const int kDefaultDurationInMs = 10000;
+static const char kAudioMp4[] = "audio/mp4";
+static const char kVideoMp4[] = "video/mp4";
+static const char kAudioWebM[] = "audio/webm";
+static const char kVideoWebM[] = "video/webm";
+
// Mock of MediaPlayerManager for testing purpose
class MockMediaPlayerManager : public MediaPlayerManager {
public:
- MockMediaPlayerManager() : num_requests_(0), last_seek_request_id_(0) {}
- virtual ~MockMediaPlayerManager() {};
+ explicit MockMediaPlayerManager(base::MessageLoop* message_loop)
+ : message_loop_(message_loop) {}
+ virtual ~MockMediaPlayerManager() {}
// MediaPlayerManager implementation.
virtual void RequestMediaResources(int player_id) OVERRIDE {}
@@ -36,8 +44,8 @@ class MockMediaPlayerManager : public MediaPlayerManager {
int player_id, base::TimeDelta duration, int width, int height,
bool success) OVERRIDE {}
virtual void OnPlaybackComplete(int player_id) OVERRIDE {
- if (message_loop_.is_running())
- message_loop_.Quit();
+ if (message_loop_->is_running())
+ message_loop_->Quit();
}
virtual void OnMediaInterrupted(int player_id) OVERRIDE {}
virtual void OnBufferingUpdate(int player_id, int percentage) OVERRIDE {}
@@ -49,17 +57,6 @@ class MockMediaPlayerManager : public MediaPlayerManager {
virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
virtual void DestroyAllMediaPlayers() OVERRIDE {}
- virtual void OnReadFromDemuxer(int player_id,
- media::DemuxerStream::Type type) OVERRIDE {
- num_requests_++;
- if (message_loop_.is_running())
- message_loop_.Quit();
- }
- virtual void OnMediaSeekRequest(int player_id, base::TimeDelta time_to_seek,
- unsigned seek_request_id) OVERRIDE {
- last_seek_request_id_ = seek_request_id;
- }
- virtual void OnMediaConfigRequest(int player_id) OVERRIDE {}
virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
return NULL;
}
@@ -75,23 +72,55 @@ class MockMediaPlayerManager : public MediaPlayerManager {
const std::vector<uint8>& message,
const std::string& destination_url) OVERRIDE {}
+ private:
+ base::MessageLoop* message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockMediaPlayerManager);
+};
+
+class MockDemuxerAndroid : public DemuxerAndroid {
+ public:
+ explicit MockDemuxerAndroid(base::MessageLoop* message_loop)
+ : message_loop_(message_loop),
+ num_requests_(0),
+ last_seek_request_id_(0) {}
+ virtual ~MockDemuxerAndroid() {}
+
+ virtual void AddDemuxerClient(int demuxer_client_id,
+ DemuxerAndroidClient* client) OVERRIDE {}
+ virtual void RemoveDemuxerClient(int demuxer_client_id) OVERRIDE {}
+ virtual void RequestDemuxerConfigs(int demuxer_client_id) OVERRIDE {}
+ virtual void RequestDemuxerData(int demuxer_client_id,
+ media::DemuxerStream::Type type) OVERRIDE {
+ num_requests_++;
+ if (message_loop_->is_running())
+ message_loop_->Quit();
+ }
+ virtual void RequestDemuxerSeek(int demuxer_client_id,
+ base::TimeDelta time_to_seek,
+ unsigned seek_request_id) OVERRIDE {
+ last_seek_request_id_ = seek_request_id;
+ }
+
int num_requests() const { return num_requests_; }
unsigned last_seek_request_id() const { return last_seek_request_id_; }
- base::MessageLoop* message_loop() { return &message_loop_; }
private:
- // The number of request this object sents for decoding data.
+ base::MessageLoop* message_loop_;
+
+ // The number of request this object has requested for decoding data.
int num_requests_;
unsigned last_seek_request_id_;
- base::MessageLoop message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockDemuxerAndroid);
};
class MediaSourcePlayerTest : public testing::Test {
public:
- MediaSourcePlayerTest() {
- manager_.reset(new MockMediaPlayerManager());
- player_.reset(new MediaSourcePlayer(0, manager_.get()));
- }
+ MediaSourcePlayerTest()
+ : manager_(&message_loop_),
+ demuxer_(&message_loop_),
+ player_(0, &manager_, 0, &demuxer_) {}
virtual ~MediaSourcePlayerTest() {}
protected:
@@ -99,79 +128,105 @@ class MediaSourcePlayerTest : public testing::Test {
MediaDecoderJob* GetMediaDecoderJob(bool is_audio) {
if (is_audio) {
return reinterpret_cast<MediaDecoderJob*>(
- player_->audio_decoder_job_.get());
+ player_.audio_decoder_job_.get());
}
return reinterpret_cast<MediaDecoderJob*>(
- player_->video_decoder_job_.get());
+ player_.video_decoder_job_.get());
}
// Starts an audio decoder job.
void StartAudioDecoderJob() {
- MediaPlayerHostMsg_DemuxerReady_Params params;
- params.audio_codec = kCodecVorbis;
- params.audio_channels = 2;
- params.audio_sampling_rate = 44100;
- params.is_audio_encrypted = false;
- params.duration_ms = kDefaultDurationInMs;
+ DemuxerConfigs configs;
+ configs.audio_codec = kCodecVorbis;
+ configs.audio_channels = 2;
+ configs.audio_sampling_rate = 44100;
+ configs.is_audio_encrypted = false;
+ configs.duration_ms = kDefaultDurationInMs;
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
- params.audio_extra_data = std::vector<uint8>(
+ configs.audio_extra_data = std::vector<uint8>(
buffer->data(),
buffer->data() + buffer->data_size());
- Start(params);
+ Start(configs);
}
void StartVideoDecoderJob() {
- MediaPlayerHostMsg_DemuxerReady_Params params;
- params.video_codec = kCodecVP8;
- params.video_size = gfx::Size(320, 240);
- params.is_video_encrypted = false;
- params.duration_ms = kDefaultDurationInMs;
- Start(params);
+ DemuxerConfigs configs;
+ configs.video_codec = kCodecVP8;
+ configs.video_size = gfx::Size(320, 240);
+ configs.is_video_encrypted = false;
+ configs.duration_ms = kDefaultDurationInMs;
+ Start(configs);
}
// Starts decoding the data.
- void Start(const MediaPlayerHostMsg_DemuxerReady_Params& params) {
- player_->DemuxerReady(params);
- player_->Start();
+ void Start(const DemuxerConfigs& configs) {
+ player_.OnDemuxerConfigsAvailable(configs);
+ player_.Start();
}
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params
- CreateReadFromDemuxerAckForAudio() {
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params ack_params;
- ack_params.type = DemuxerStream::AUDIO;
- ack_params.access_units.resize(1);
- ack_params.access_units[0].status = DemuxerStream::kOk;
- scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-packet-0");
- ack_params.access_units[0].data = std::vector<uint8>(
+ DemuxerData CreateReadFromDemuxerAckForAudio(int packet_id) {
+ DemuxerData data;
+ data.type = DemuxerStream::AUDIO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kOk;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(
+ base::StringPrintf("vorbis-packet-%d", packet_id));
+ data.access_units[0].data = std::vector<uint8>(
buffer->data(), buffer->data() + buffer->data_size());
// Vorbis needs 4 extra bytes padding on Android to decode properly. Check
// NuMediaExtractor.cpp in Android source code.
uint8 padding[4] = { 0xff , 0xff , 0xff , 0xff };
- ack_params.access_units[0].data.insert(
- ack_params.access_units[0].data.end(), padding, padding + 4);
- return ack_params;
+ data.access_units[0].data.insert(
+ data.access_units[0].data.end(), padding, padding + 4);
+ return data;
}
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params
- CreateReadFromDemuxerAckForVideo() {
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params ack_params;
- ack_params.type = DemuxerStream::VIDEO;
- ack_params.access_units.resize(1);
- ack_params.access_units[0].status = DemuxerStream::kOk;
+ DemuxerData CreateReadFromDemuxerAckForVideo() {
+ DemuxerData data;
+ data.type = DemuxerStream::VIDEO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kOk;
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("vp8-I-frame-320x240");
- ack_params.access_units[0].data = std::vector<uint8>(
+ data.access_units[0].data = std::vector<uint8>(
buffer->data(), buffer->data() + buffer->data_size());
- return ack_params;
+ return data;
}
+ DemuxerData CreateEOSAck(bool is_audio) {
+ DemuxerData data;
+ data.type = is_audio ? DemuxerStream::AUDIO : DemuxerStream::VIDEO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kOk;
+ data.access_units[0].end_of_stream = true;
+ return data;
+ }
+
base::TimeTicks StartTimeTicks() {
- return player_->start_time_ticks_;
+ return player_.start_time_ticks_;
+ }
+
+ bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
+ const std::string& security_level,
+ const std::string& container,
+ const std::vector<std::string>& codecs) {
+ return MediaSourcePlayer::IsTypeSupported(
+ scheme_uuid, security_level, container, codecs);
+ }
+
+ void CreateAndSetVideoSurface() {
+ surface_texture_ = new gfx::SurfaceTexture(0);
+ surface_ = gfx::ScopedJavaSurface(surface_texture_.get());
+ player_.SetVideoSurface(surface_.Pass());
}
protected:
- scoped_ptr<MockMediaPlayerManager> manager_;
- scoped_ptr<MediaSourcePlayer> player_;
+ base::MessageLoop message_loop_;
+ MockMediaPlayerManager manager_;
+ MockDemuxerAndroid demuxer_;
+ MediaSourcePlayer player_;
+ scoped_refptr<gfx::SurfaceTexture> surface_texture_;
+ gfx::ScopedJavaSurface surface_;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayerTest);
};
@@ -183,7 +238,7 @@ TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithValidConfig) {
// Test audio decoder job will be created when codec is successfully started.
StartAudioDecoderJob();
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
@@ -191,18 +246,18 @@ TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
return;
// Test audio decoder job will not be created when failed to start the codec.
- MediaPlayerHostMsg_DemuxerReady_Params params;
- params.audio_codec = kCodecVorbis;
- params.audio_channels = 2;
- params.audio_sampling_rate = 44100;
- params.is_audio_encrypted = false;
- params.duration_ms = kDefaultDurationInMs;
+ DemuxerConfigs configs;
+ configs.audio_codec = kCodecVorbis;
+ configs.audio_channels = 2;
+ configs.audio_sampling_rate = 44100;
+ configs.is_audio_encrypted = false;
+ configs.duration_ms = kDefaultDurationInMs;
uint8 invalid_codec_data[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
- params.audio_extra_data.insert(params.audio_extra_data.begin(),
+ configs.audio_extra_data.insert(configs.audio_extra_data.begin(),
invalid_codec_data, invalid_codec_data + 4);
- Start(params);
+ Start(configs);
EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
@@ -210,20 +265,17 @@ TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
return;
// Test video decoder job will be created when surface is valid.
- scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
- new gfx::SurfaceTextureBridge(0));
- gfx::ScopedJavaSurface surface(surface_texture.get());
StartVideoDecoderJob();
// Video decoder job will not be created until surface is available.
EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
- player_->SetVideoSurface(surface.Pass());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ CreateAndSetVideoSurface();
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
// The decoder job should be ready now.
EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
@@ -231,21 +283,21 @@ TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
return;
// Test video decoder job will be created when surface is valid.
- scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
- new gfx::SurfaceTextureBridge(0));
+ scoped_refptr<gfx::SurfaceTexture> surface_texture(
+ new gfx::SurfaceTexture(0));
gfx::ScopedJavaSurface surface(surface_texture.get());
StartVideoDecoderJob();
// Video decoder job will not be created until surface is available.
EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
// Release the surface texture.
surface_texture = NULL;
- player_->SetVideoSurface(surface.Pass());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
@@ -255,15 +307,31 @@ TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
// Test decoder job will resend a ReadFromDemuxer request after seek.
StartAudioDecoderJob();
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
// Initiate a seek
- player_->SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_.SeekTo(base::TimeDelta());
+
+ // Verify that the seek does not occur until the initial prefetch
+ // completes.
+ EXPECT_EQ(0u, demuxer_.last_seek_request_id());
+
+ // Simulate aborted read caused by the seek. This aborts the initial
+ // prefetch.
+ DemuxerData data;
+ data.type = DemuxerStream::AUDIO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kAborted;
+ player_.OnDemuxerDataAvailable(data);
+
+ // Verify that the seek is requested now that the initial prefetch
+ // has completed.
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+
// Sending back the seek ACK, this should trigger the player to call
// OnReadFromDemuxer() again.
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
- EXPECT_EQ(2, manager_->num_requests());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
+ EXPECT_EQ(2, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
@@ -272,23 +340,52 @@ TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
// Test SetVideoSurface() will not cause an extra seek while the player is
// waiting for a seek ACK.
- scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
- new gfx::SurfaceTextureBridge(0));
- gfx::ScopedJavaSurface surface(surface_texture.get());
StartVideoDecoderJob();
// Player is still waiting for SetVideoSurface(), so no request is sent.
- EXPECT_EQ(0, manager_->num_requests());
- player_->SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ EXPECT_EQ(0, demuxer_.num_requests());
+ player_.SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- player_->SetVideoSurface(surface.Pass());
+ CreateAndSetVideoSurface();
EXPECT_TRUE(NULL == GetMediaDecoderJob(false));
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
// Send the seek ack, player should start requesting data afterwards.
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ChangeMultipleSurfaceWhileDecoding) {
+ if (!MediaCodecBridge::IsAvailable()) {
+ LOG(INFO) << "Could not run test - not supported on device.";
+ return;
+ }
+
+ // Test MediaSourcePlayer can switch multiple surfaces during decoding.
+ CreateAndSetVideoSurface();
+ StartVideoDecoderJob();
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ EXPECT_EQ(0, demuxer_.num_requests());
+
+ // Send the first input chunk.
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
+ EXPECT_EQ(1, demuxer_.num_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // While the decoder is decoding, change multiple surfaces. Pass an empty
+ // surface first.
+ gfx::ScopedJavaSurface empty_surface;
+ player_.SetVideoSurface(empty_surface.Pass());
+ // Pass a new non-empty surface.
+ CreateAndSetVideoSurface();
+
+ // Wait for the decoder job to finish decoding.
+ while(GetMediaDecoderJob(false)->is_decoding())
+ message_loop_.RunUntilIdle();
+ // A seek should be initiated to request Iframe.
+ EXPECT_EQ(2u, demuxer_.last_seek_request_id());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartAfterSeekFinish) {
@@ -296,28 +393,28 @@ TEST_F(MediaSourcePlayerTest, StartAfterSeekFinish) {
return;
// Test decoder job will not start until all pending seek event is handled.
- MediaPlayerHostMsg_DemuxerReady_Params params;
- params.audio_codec = kCodecVorbis;
- params.audio_channels = 2;
- params.audio_sampling_rate = 44100;
- params.is_audio_encrypted = false;
- params.duration_ms = kDefaultDurationInMs;
- player_->DemuxerReady(params);
+ DemuxerConfigs configs;
+ configs.audio_codec = kCodecVorbis;
+ configs.audio_channels = 2;
+ configs.audio_sampling_rate = 44100;
+ configs.is_audio_encrypted = false;
+ configs.duration_ms = kDefaultDurationInMs;
+ player_.OnDemuxerConfigsAvailable(configs);
EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
// Initiate a seek
- player_->SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_.SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- player_->Start();
+ player_.Start();
EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, manager_->num_requests());
+ EXPECT_EQ(0, demuxer_.num_requests());
// Sending back the seek ACK.
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
}
TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
@@ -330,27 +427,27 @@ TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
EXPECT_TRUE(NULL != decoder_job);
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
// Sending data to player.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
// Decoder job will not immediately stop after Pause() since it is
// running on another thread.
- player_->Pause();
+ player_.Pause(true);
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
// Nothing happens when calling Start() again.
- player_->Start();
+ player_.Start();
// Verify that Start() will not destroy and recreate the decoder job.
EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_EQ(1, demuxer_.num_requests());
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- manager_->message_loop()->Run();
+ message_loop_.Run();
// The decoder job should finish and a new request will be sent.
- EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_EQ(2, demuxer_.num_requests());
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
}
@@ -360,71 +457,86 @@ TEST_F(MediaSourcePlayerTest, DecoderJobsCannotStartWithoutAudio) {
// Test that when Start() is called, video decoder jobs will wait for audio
// decoder job before start decoding the data.
- MediaPlayerHostMsg_DemuxerReady_Params params;
- params.audio_codec = kCodecVorbis;
- params.audio_channels = 2;
- params.audio_sampling_rate = 44100;
- params.is_audio_encrypted = false;
+ DemuxerConfigs configs;
+ configs.audio_codec = kCodecVorbis;
+ configs.audio_channels = 2;
+ configs.audio_sampling_rate = 44100;
+ configs.is_audio_encrypted = false;
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
- params.audio_extra_data = std::vector<uint8>(
+ configs.audio_extra_data = std::vector<uint8>(
buffer->data(),
buffer->data() + buffer->data_size());
- params.video_codec = kCodecVP8;
- params.video_size = gfx::Size(320, 240);
- params.is_video_encrypted = false;
- params.duration_ms = kDefaultDurationInMs;
- Start(params);
- EXPECT_EQ(0, manager_->num_requests());
-
- scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
- new gfx::SurfaceTextureBridge(0));
- gfx::ScopedJavaSurface surface(surface_texture.get());
- player_->SetVideoSurface(surface.Pass());
- EXPECT_EQ(1u, manager_->last_seek_request_id());
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ configs.video_codec = kCodecVP8;
+ configs.video_size = gfx::Size(320, 240);
+ configs.is_video_encrypted = false;
+ configs.duration_ms = kDefaultDurationInMs;
+ Start(configs);
+ EXPECT_EQ(0, demuxer_.num_requests());
+
+ CreateAndSetVideoSurface();
+ EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
MediaDecoderJob* audio_decoder_job = GetMediaDecoderJob(true);
MediaDecoderJob* video_decoder_job = GetMediaDecoderJob(false);
- EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_EQ(2, demuxer_.num_requests());
EXPECT_FALSE(audio_decoder_job->is_decoding());
EXPECT_FALSE(video_decoder_job->is_decoding());
// Sending audio data to player, audio decoder should not start.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForVideo());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
EXPECT_FALSE(video_decoder_job->is_decoding());
// Sending video data to player, both decoders should start now.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
EXPECT_TRUE(audio_decoder_job->is_decoding());
EXPECT_TRUE(video_decoder_job->is_decoding());
}
-// Disabled due to http://crbug.com/266041.
-// TODO(xhwang/qinmin): Fix this test and reenable it.
-TEST_F(MediaSourcePlayerTest,
- DISABLED_StartTimeTicksResetAfterDecoderUnderruns) {
+TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
if (!MediaCodecBridge::IsAvailable())
return;
// Test start time ticks will reset after decoder job underruns.
StartAudioDecoderJob();
EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, manager_->num_requests());
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ EXPECT_EQ(1, demuxer_.num_requests());
+ // For the first couple chunks, the decoder job may return
+ // DECODE_FORMAT_CHANGED status instead of DECODE_SUCCEEDED status. Decode
+ // more frames to guarantee that DECODE_SUCCEEDED will be returned.
+ for (int i = 0; i < 4; ++i) {
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ message_loop_.Run();
+ }
- manager_->message_loop()->Run();
// The decoder job should finish and a new request will be sent.
- EXPECT_EQ(2, manager_->num_requests());
- EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+ EXPECT_EQ(5, demuxer_.num_requests());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
base::TimeTicks previous = StartTimeTicks();
// Let the decoder timeout and execute the OnDecoderStarved() callback.
base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
- manager_->message_loop()->RunUntilIdle();
- // Send new data to the decoder. This should reset the start time ticks.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ EXPECT_TRUE(StartTimeTicks() != base::TimeTicks());
+ message_loop_.RunUntilIdle();
+
+ // Send new data to the decoder so it can finish the currently
+ // pending decode.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
+ while(GetMediaDecoderJob(true)->is_decoding())
+ message_loop_.RunUntilIdle();
+
+ // Verify the start time ticks is cleared at this point because the
+ // player is prefetching.
+ EXPECT_TRUE(StartTimeTicks() == base::TimeTicks());
+
+ // Send new data to the decoder so it can finish prefetching. This should
+ // reset the start time ticks.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
+ EXPECT_TRUE(StartTimeTicks() != base::TimeTicks());
+
base::TimeTicks current = StartTimeTicks();
EXPECT_LE(100.0, (current - previous).InMillisecondsF());
}
@@ -435,28 +547,168 @@ TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterInputEOS) {
// Test MediaSourcePlayer will not request for new data after input EOS is
// reached.
- scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
- new gfx::SurfaceTextureBridge(0));
- gfx::ScopedJavaSurface surface(surface_texture.get());
- player_->SetVideoSurface(surface.Pass());
+ CreateAndSetVideoSurface();
StartVideoDecoderJob();
- player_->OnSeekRequestAck(manager_->last_seek_request_id());
- EXPECT_EQ(1, manager_->num_requests());
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
+ EXPECT_EQ(1, demuxer_.num_requests());
// Send the first input chunk.
- player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForVideo());
- manager_->message_loop()->Run();
- EXPECT_EQ(2, manager_->num_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ message_loop_.Run();
+ EXPECT_EQ(2, demuxer_.num_requests());
// Send EOS.
- MediaPlayerHostMsg_ReadFromDemuxerAck_Params ack_params;
- ack_params.type = DemuxerStream::VIDEO;
- ack_params.access_units.resize(1);
- ack_params.access_units[0].status = DemuxerStream::kOk;
- ack_params.access_units[0].end_of_stream = true;
- player_->ReadFromDemuxerAck(ack_params);
- manager_->message_loop()->Run();
+ player_.OnDemuxerDataAvailable(CreateEOSAck(false));
+ message_loop_.Run();
// No more request for data should be made.
- EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_EQ(2, demuxer_.num_requests());
}
+TEST_F(MediaSourcePlayerTest, ReplayAfterInputEOS) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test MediaSourcePlayer can replay after input EOS is
+ // reached.
+ CreateAndSetVideoSurface();
+ StartVideoDecoderJob();
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
+ EXPECT_EQ(1, demuxer_.num_requests());
+ // Send the first input chunk.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ message_loop_.Run();
+ EXPECT_EQ(2, demuxer_.num_requests());
+
+ // Send EOS.
+ player_.OnDemuxerDataAvailable(CreateEOSAck(false));
+ message_loop_.Run();
+ // No more request for data should be made.
+ EXPECT_EQ(2, demuxer_.num_requests());
+
+ player_.SeekTo(base::TimeDelta());
+ StartVideoDecoderJob();
+ player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
+ // Seek/Play after EOS should request more data.
+ EXPECT_EQ(3, demuxer_.num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterAbort) {
+ if (!MediaCodecBridge::IsAvailable()) {
+ LOG(INFO) << "Could not run test - not supported on device.";
+ return;
+ }
+
+ // Test that the decoder will request new data after receiving an aborted
+ // access unit.
+ StartAudioDecoderJob();
+ EXPECT_EQ(1, demuxer_.num_requests());
+
+ // Send an aborted access unit.
+ DemuxerData data;
+ data.type = DemuxerStream::AUDIO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kAborted;
+ player_.OnDemuxerDataAvailable(data);
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ // Wait for the decoder job to finish decoding.
+ while(GetMediaDecoderJob(true)->is_decoding())
+ message_loop_.RunUntilIdle();
+
+ // No request will be sent for new data.
+ EXPECT_EQ(1, demuxer_.num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, DemuxerDataArrivesAfterRelease) {
+ if (!MediaCodecBridge::IsAvailable()) {
+ LOG(INFO) << "Could not run test - not supported on device.";
+ return;
+ }
+
+ // Test that the decoder should not crash if demuxer data arrives after
+ // Release().
+ StartAudioDecoderJob();
+ EXPECT_TRUE(player_.IsPlaying());
+ EXPECT_EQ(1, demuxer_.num_requests());
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+
+ player_.Release();
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+
+ // The decoder job should have been released.
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_EQ(1, demuxer_.num_requests());
+}
+
+// TODO(xhwang): Enable this test when the test devices are updated.
+TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
+ if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
+ LOG(INFO) << "Could not run test - not supported on device.";
+ return;
+ }
+
+ uint8 kWidevineUUID[] = { 0xED, 0xEF, 0x8B, 0xA9, 0x79, 0xD6, 0x4A, 0xCE,
+ 0xA3, 0xC8, 0x27, 0xDC, 0xD5, 0x1D, 0x21, 0xED };
+
+ std::vector<uint8> widevine_uuid(kWidevineUUID,
+ kWidevineUUID + arraysize(kWidevineUUID));
+
+ // We test "L3" fully. But for "L1" we don't check the result as it depend on
+ // whether the test device supports "L1" decoding.
+
+ std::vector<std::string> codec_avc(1, "avc1");
+ std::vector<std::string> codec_aac(1, "mp4a");
+ std::vector<std::string> codec_avc_aac(1, "avc1");
+ codec_avc_aac.push_back("mp4a");
+
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoMp4, codec_avc));
+ IsTypeSupported(widevine_uuid, "L1", kVideoMp4, codec_avc);
+
+ // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
+ // Clean this up after we have a solution to specifying decoding mode.
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kAudioMp4, codec_aac));
+ IsTypeSupported(widevine_uuid, "L1", kAudioMp4, codec_aac);
+
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoMp4, codec_avc_aac));
+ IsTypeSupported(widevine_uuid, "L1", kVideoMp4, codec_avc_aac);
+
+ std::vector<std::string> codec_vp8(1, "vp8");
+ std::vector<std::string> codec_vorbis(1, "vorbis");
+ std::vector<std::string> codec_vp8_vorbis(1, "vp8");
+ codec_vp8_vorbis.push_back("vorbis");
+
+ // TODO(xhwang): WebM is actually not supported but currently
+ // MediaDrmBridge.isCryptoSchemeSupported() doesn't check the container type.
+ // Fix isCryptoSchemeSupported() and update this test as necessary.
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoWebM, codec_vp8));
+ IsTypeSupported(widevine_uuid, "L1", kVideoWebM, codec_vp8);
+
+ // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
+ // Clean this up after we have a solution to specifying decoding mode.
+ EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kAudioWebM, codec_vorbis));
+ IsTypeSupported(widevine_uuid, "L1", kAudioWebM, codec_vorbis);
+
+ EXPECT_TRUE(
+ IsTypeSupported(widevine_uuid, "L3", kVideoWebM, codec_vp8_vorbis));
+ IsTypeSupported(widevine_uuid, "L1", kVideoWebM, codec_vp8_vorbis);
+}
+
+TEST_F(MediaSourcePlayerTest, IsTypeSupported_InvalidUUID) {
+ if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
+ LOG(INFO) << "Could not run test - not supported on device.";
+ return;
+ }
+
+ uint8 kInvalidUUID[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF };
+
+ std::vector<uint8> invalid_uuid(kInvalidUUID,
+ kInvalidUUID + arraysize(kInvalidUUID));
+
+ std::vector<std::string> codec_avc(1, "avc1");
+ EXPECT_FALSE(IsTypeSupported(invalid_uuid, "L3", kVideoMp4, codec_avc));
+ EXPECT_FALSE(IsTypeSupported(invalid_uuid, "L1", kVideoMp4, codec_avc));
+}
+
+// TODO(xhwang): Are these IsTypeSupported tests device specific?
+// TODO(xhwang): Add more IsTypeSupported tests.
+
} // namespace media
diff --git a/chromium/media/base/android/video_decoder_job.cc b/chromium/media/base/android/video_decoder_job.cc
new file mode 100644
index 00000000000..af89593362e
--- /dev/null
+++ b/chromium/media/base/android/video_decoder_job.cc
@@ -0,0 +1,68 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/video_decoder_job.h"
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/threading/thread.h"
+#include "media/base/android/media_codec_bridge.h"
+
+namespace media {
+
+class VideoDecoderThread : public base::Thread {
+ public:
+ VideoDecoderThread() : base::Thread("MediaSource_VideoDecoderThread") {
+ Start();
+ }
+};
+
+// TODO(qinmin): Check if it is tolerable to use worker pool to handle all the
+// decoding tasks so that we don't need a global thread here.
+// http://crbug.com/245750
+base::LazyInstance<VideoDecoderThread>::Leaky
+ g_video_decoder_thread = LAZY_INSTANCE_INITIALIZER;
+
+VideoDecoderJob* VideoDecoderJob::Create(const VideoCodec video_codec,
+ bool is_secure,
+ const gfx::Size& size,
+ jobject surface,
+ jobject media_crypto,
+ const base::Closure& request_data_cb) {
+ scoped_ptr<VideoCodecBridge> codec(
+ VideoCodecBridge::Create(video_codec, is_secure));
+ if (codec && codec->Start(video_codec, size, surface, media_crypto))
+ return new VideoDecoderJob(codec.Pass(), request_data_cb);
+
+ LOG(ERROR) << "Failed to create VideoDecoderJob.";
+ return NULL;
+}
+
+VideoDecoderJob::VideoDecoderJob(
+ scoped_ptr<VideoCodecBridge> video_codec_bridge,
+ const base::Closure& request_data_cb)
+ : MediaDecoderJob(g_video_decoder_thread.Pointer()->message_loop_proxy(),
+ video_codec_bridge.get(), request_data_cb),
+ video_codec_bridge_(video_codec_bridge.Pass()) {
+}
+
+VideoDecoderJob::~VideoDecoderJob() {
+}
+
+void VideoDecoderJob::ReleaseOutputBuffer(
+ int outputBufferIndex, size_t size,
+ const base::TimeDelta& presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback,
+ MediaCodecStatus status) {
+ if (status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u)
+ video_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, true);
+
+ callback.Run(status, presentation_timestamp, 0);
+}
+
+bool VideoDecoderJob::ComputeTimeToRender() const {
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/video_decoder_job.h b/chromium/media/base/android/video_decoder_job.h
new file mode 100644
index 00000000000..27a3957c685
--- /dev/null
+++ b/chromium/media/base/android/video_decoder_job.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_VIDEO_DECODER_JOB_H_
+#define MEDIA_BASE_ANDROID_VIDEO_DECODER_JOB_H_
+
+#include <jni.h>
+
+#include "media/base/android/media_decoder_job.h"
+
+namespace media {
+
+class VideoCodecBridge;
+
+// Class for managing video decoding jobs.
+class VideoDecoderJob : public MediaDecoderJob {
+ public:
+ virtual ~VideoDecoderJob();
+
+ // Create a new VideoDecoderJob instance.
+ // |video_codec| - The video format the object needs to decode.
+ // |is_secure| - Whether secure decoding is required.
+ // |size| - The natural size of the output frames.
+ // |surface| - The surface to render the frames to.
+ // |media_crypto| - Handle to a Java object responsible for decrypting the
+ // video data.
+ // |request_data_cb| - Callback used to request more data for the decoder.
+ static VideoDecoderJob* Create(const VideoCodec video_codec,
+ bool is_secure,
+ const gfx::Size& size,
+ jobject surface,
+ jobject media_crypto,
+ const base::Closure& request_data_cb);
+
+ private:
+ VideoDecoderJob(scoped_ptr<VideoCodecBridge> video_codec_bridge,
+ const base::Closure& request_data_cb);
+
+ // MediaDecoderJob implementation.
+ virtual void ReleaseOutputBuffer(
+ int outputBufferIndex, size_t size,
+ const base::TimeDelta& presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback,
+ MediaCodecStatus status) OVERRIDE;
+
+ virtual bool ComputeTimeToRender() const OVERRIDE;
+
+ scoped_ptr<VideoCodecBridge> video_codec_bridge_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_VIDEO_DECODER_JOB_H_
diff --git a/chromium/media/base/audio_buffer.cc b/chromium/media/base/audio_buffer.cc
index b2cdd8c41a7..0bf37209b2b 100644
--- a/chromium/media/base/audio_buffer.cc
+++ b/chromium/media/base/audio_buffer.cc
@@ -11,11 +11,6 @@
namespace media {
-// Alignment of each channel's data; this must match what ffmpeg expects
-// (which may be 0, 16, or 32, depending on the processor). Selecting 32 in
-// order to work on all processors.
-enum { kChannelAlignment = 32 };
-
AudioBuffer::AudioBuffer(SampleFormat sample_format,
int channel_count,
int frame_count,
@@ -73,6 +68,8 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
data_size *= channel_count;
data_.reset(
static_cast<uint8*>(base::AlignedAlloc(data_size, kChannelAlignment)));
+ channel_data_.reserve(1);
+ channel_data_.push_back(data_.get());
if (data)
memcpy(data_.get(), data[0], data_size);
}
diff --git a/chromium/media/base/audio_buffer.h b/chromium/media/base/audio_buffer.h
index e52355ac4c2..c3bcf4dab9c 100644
--- a/chromium/media/base/audio_buffer.h
+++ b/chromium/media/base/audio_buffer.h
@@ -23,6 +23,11 @@ class AudioBus;
class MEDIA_EXPORT AudioBuffer
: public base::RefCountedThreadSafe<AudioBuffer> {
public:
+ // Alignment of each channel's data; this must match what ffmpeg expects
+ // (which may be 0, 16, or 32, depending on the processor). Selecting 32 in
+ // order to work on all processors.
+ enum { kChannelAlignment = 32 };
+
// Create an AudioBuffer whose channel data is copied from |data|. For
// interleaved data, only the first buffer is used. For planar data, the
// number of buffers must be equal to |channel_count|. |frame_count| is the
@@ -95,8 +100,8 @@ class MEDIA_EXPORT AudioBuffer
bool end_of_stream() const { return end_of_stream_; }
// Access to the raw buffer for ffmpeg to write directly to. Data for planar
- // data is grouped by channel.
- uint8* writable_data() { return data_.get(); }
+ // data is grouped by channel. There is only 1 entry for interleaved formats.
+ const std::vector<uint8*>& channel_data() const { return channel_data_; }
private:
friend class base::RefCountedThreadSafe<AudioBuffer>;
diff --git a/chromium/media/base/audio_capturer_source.h b/chromium/media/base/audio_capturer_source.h
index deae5e22dc1..b584f8a48db 100644
--- a/chromium/media/base/audio_capturer_source.h
+++ b/chromium/media/base/audio_capturer_source.h
@@ -26,7 +26,8 @@ class AudioCapturerSource
// Callback to deliver the captured data from the OS.
virtual void Capture(AudioBus* audio_source,
int audio_delay_milliseconds,
- double volume) = 0;
+ double volume,
+ bool key_pressed) = 0;
// Signals an error has occurred.
virtual void OnCaptureError() = 0;
diff --git a/chromium/media/base/audio_decoder_config.cc b/chromium/media/base/audio_decoder_config.cc
index 38db05d3a54..dfaf94a2682 100644
--- a/chromium/media/base/audio_decoder_config.cc
+++ b/chromium/media/base/audio_decoder_config.cc
@@ -6,6 +6,7 @@
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/time/time.h"
#include "media/audio/sample_rates.h"
#include "media/base/limits.h"
#include "media/base/sample_format.h"
@@ -30,7 +31,8 @@ AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
size_t extra_data_size,
bool is_encrypted) {
Initialize(codec, sample_format, channel_layout, samples_per_second,
- extra_data, extra_data_size, is_encrypted, true);
+ extra_data, extra_data_size, is_encrypted, true,
+ base::TimeDelta(), base::TimeDelta());
}
void AudioDecoderConfig::Initialize(AudioCodec codec,
@@ -40,7 +42,9 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
const uint8* extra_data,
size_t extra_data_size,
bool is_encrypted,
- bool record_stats) {
+ bool record_stats,
+ base::TimeDelta seek_preroll,
+ base::TimeDelta codec_delay) {
CHECK((extra_data_size != 0) == (extra_data != NULL));
if (record_stats) {
@@ -66,6 +70,8 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
bytes_per_channel_ = SampleFormatToBytesPerChannel(sample_format);
extra_data_.assign(extra_data, extra_data + extra_data_size);
is_encrypted_ = is_encrypted;
+ seek_preroll_ = seek_preroll;
+ codec_delay_ = codec_delay;
int channels = ChannelLayoutToChannelCount(channel_layout_);
bytes_per_frame_ = channels * bytes_per_channel_;
@@ -80,7 +86,9 @@ bool AudioDecoderConfig::IsValidConfig() const {
bytes_per_channel_ <= limits::kMaxBytesPerSample &&
samples_per_second_ > 0 &&
samples_per_second_ <= limits::kMaxSampleRate &&
- sample_format_ != kUnknownSampleFormat;
+ sample_format_ != kUnknownSampleFormat &&
+ seek_preroll_ >= base::TimeDelta() &&
+ codec_delay_ >= base::TimeDelta();
}
bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
@@ -92,7 +100,9 @@ bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
(!extra_data() || !memcmp(extra_data(), config.extra_data(),
extra_data_size())) &&
(is_encrypted() == config.is_encrypted()) &&
- (sample_format() == config.sample_format()));
+ (sample_format() == config.sample_format()) &&
+ (seek_preroll() == config.seek_preroll()) &&
+ (codec_delay() == config.codec_delay()));
}
} // namespace media
diff --git a/chromium/media/base/audio_decoder_config.h b/chromium/media/base/audio_decoder_config.h
index 1c61e70c3ad..a17d2215b97 100644
--- a/chromium/media/base/audio_decoder_config.h
+++ b/chromium/media/base/audio_decoder_config.h
@@ -8,6 +8,7 @@
#include <vector>
#include "base/basictypes.h"
+#include "base/time/time.h"
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
#include "media/base/sample_format.h"
@@ -63,7 +64,9 @@ class MEDIA_EXPORT AudioDecoderConfig {
void Initialize(AudioCodec codec, SampleFormat sample_format,
ChannelLayout channel_layout, int samples_per_second,
const uint8* extra_data, size_t extra_data_size,
- bool is_encrypted, bool record_stats);
+ bool is_encrypted, bool record_stats,
+ base::TimeDelta seek_preroll,
+ base::TimeDelta codec_delay);
// Returns true if this object has appropriate configuration values, false
// otherwise.
@@ -80,6 +83,8 @@ class MEDIA_EXPORT AudioDecoderConfig {
int samples_per_second() const { return samples_per_second_; }
SampleFormat sample_format() const { return sample_format_; }
int bytes_per_frame() const { return bytes_per_frame_; }
+ base::TimeDelta seek_preroll() const { return seek_preroll_; }
+ base::TimeDelta codec_delay() const { return codec_delay_; }
// Optional byte data required to initialize audio decoders such as Vorbis
// codebooks.
@@ -103,6 +108,15 @@ class MEDIA_EXPORT AudioDecoderConfig {
std::vector<uint8> extra_data_;
bool is_encrypted_;
+ // |seek_preroll_| is the duration of the data that the decoder must decode
+ // before the decoded data is valid.
+ base::TimeDelta seek_preroll_;
+
+ // |codec_delay_| is the overall delay overhead added by the codec while
+ // encoding. This value should be subtracted from each block's timestamp to
+ // get the actual timestamp.
+ base::TimeDelta codec_delay_;
+
// Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
// generated copy constructor and assignment operator. Since the extra data is
// typically small, the performance impact is minimal.
diff --git a/chromium/media/base/audio_hash.cc b/chromium/media/base/audio_hash.cc
index 0ed6fe51e6f..28f16418b66 100644
--- a/chromium/media/base/audio_hash.cc
+++ b/chromium/media/base/audio_hash.cc
@@ -50,4 +50,4 @@ std::string AudioHash::ToString() const {
return result;
}
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/base/audio_hash.h b/chromium/media/base/audio_hash.h
index 3dc0e9edb3b..91d6edf9043 100644
--- a/chromium/media/base/audio_hash.h
+++ b/chromium/media/base/audio_hash.h
@@ -56,4 +56,4 @@ class MEDIA_EXPORT AudioHash {
} // namespace media
-#endif // MEDIA_BASE_AUDIO_HASH_H_ \ No newline at end of file
+#endif // MEDIA_BASE_AUDIO_HASH_H_
diff --git a/chromium/media/base/audio_hash_unittest.cc b/chromium/media/base/audio_hash_unittest.cc
index ee1b1de420e..bdc5a2cc56d 100644
--- a/chromium/media/base/audio_hash_unittest.cc
+++ b/chromium/media/base/audio_hash_unittest.cc
@@ -164,4 +164,4 @@ TEST_F(AudioHashTest, VerifySimilarHash) {
EXPECT_NE(hash_one.ToString(), hash_three.ToString());
}
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/base/bit_reader.cc b/chromium/media/base/bit_reader.cc
index 9f6f4098a1f..ea74350390a 100644
--- a/chromium/media/base/bit_reader.cc
+++ b/chromium/media/base/bit_reader.cc
@@ -4,6 +4,8 @@
#include "media/base/bit_reader.h"
+#include <algorithm>
+
namespace media {
BitReader::BitReader(const uint8* data, off_t size)
diff --git a/chromium/media/base/channel_layout.cc b/chromium/media/base/channel_layout.cc
index e895ddc8541..958430ac4e7 100644
--- a/chromium/media/base/channel_layout.cc
+++ b/chromium/media/base/channel_layout.cc
@@ -184,4 +184,73 @@ int ChannelOrder(ChannelLayout layout, Channels channel) {
return kChannelOrderings[layout][channel];
}
+const char* ChannelLayoutToString(ChannelLayout layout) {
+ switch (layout) {
+ case CHANNEL_LAYOUT_NONE:
+ return "NONE";
+ case CHANNEL_LAYOUT_UNSUPPORTED:
+ return "UNSUPPORTED";
+ case CHANNEL_LAYOUT_MONO:
+ return "MONO";
+ case CHANNEL_LAYOUT_STEREO:
+ return "STEREO";
+ case CHANNEL_LAYOUT_2_1:
+ return "2.1";
+ case CHANNEL_LAYOUT_SURROUND:
+ return "SURROUND";
+ case CHANNEL_LAYOUT_4_0:
+ return "4.0";
+ case CHANNEL_LAYOUT_2_2:
+ return "2.2";
+ case CHANNEL_LAYOUT_QUAD:
+ return "QUAD";
+ case CHANNEL_LAYOUT_5_0:
+ return "5.0";
+ case CHANNEL_LAYOUT_5_1:
+ return "5.1";
+ case CHANNEL_LAYOUT_5_0_BACK:
+ return "5.0_BACK";
+ case CHANNEL_LAYOUT_5_1_BACK:
+ return "5.1_BACK";
+ case CHANNEL_LAYOUT_7_0:
+ return "7.0";
+ case CHANNEL_LAYOUT_7_1:
+ return "7.1";
+ case CHANNEL_LAYOUT_7_1_WIDE:
+ return "7.1_WIDE";
+ case CHANNEL_LAYOUT_STEREO_DOWNMIX:
+ return "STEREO_DOWNMIX";
+ case CHANNEL_LAYOUT_2POINT1:
+ return "2POINT1";
+ case CHANNEL_LAYOUT_3_1:
+ return "3.1";
+ case CHANNEL_LAYOUT_4_1:
+ return "4.1";
+ case CHANNEL_LAYOUT_6_0:
+ return "6.0";
+ case CHANNEL_LAYOUT_6_0_FRONT:
+ return "6.0_FRONT";
+ case CHANNEL_LAYOUT_HEXAGONAL:
+ return "HEXAGONAL";
+ case CHANNEL_LAYOUT_6_1:
+ return "6.1";
+ case CHANNEL_LAYOUT_6_1_BACK:
+ return "6.1_BACK";
+ case CHANNEL_LAYOUT_6_1_FRONT:
+ return "6.1_FRONT";
+ case CHANNEL_LAYOUT_7_0_FRONT:
+ return "7.0_FRONT";
+ case CHANNEL_LAYOUT_7_1_WIDE_BACK:
+ return "7.1_WIDE_BACK";
+ case CHANNEL_LAYOUT_OCTAGONAL:
+ return "OCTAGONAL";
+ case CHANNEL_LAYOUT_DISCRETE:
+ return "DISCRETE";
+ case CHANNEL_LAYOUT_MAX:
+ break;
+ }
+ NOTREACHED() << "Invalid channel layout provided: " << layout;
+ return "";
+}
+
} // namespace media
diff --git a/chromium/media/base/channel_layout.h b/chromium/media/base/channel_layout.h
index 4c96ca517f4..9354eee850d 100644
--- a/chromium/media/base/channel_layout.h
+++ b/chromium/media/base/channel_layout.h
@@ -130,6 +130,9 @@ MEDIA_EXPORT int ChannelLayoutToChannelCount(ChannelLayout layout);
// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match.
MEDIA_EXPORT ChannelLayout GuessChannelLayout(int channels);
+// Returns a string representation of the channel layout.
+MEDIA_EXPORT const char* ChannelLayoutToString(ChannelLayout layout);
+
} // namespace media
#endif // MEDIA_BASE_CHANNEL_LAYOUT_H_
diff --git a/chromium/media/base/decoder_buffer.cc b/chromium/media/base/decoder_buffer.cc
index 9eaa128ceb2..d4e75410abe 100644
--- a/chromium/media/base/decoder_buffer.cc
+++ b/chromium/media/base/decoder_buffer.cc
@@ -80,7 +80,8 @@ std::string DecoderBuffer::AsHumanReadableString() {
<< " duration: " << duration_.InMicroseconds()
<< " size: " << size_
<< " side_data_size: " << side_data_size_
- << " encrypted: " << (decrypt_config_ != NULL);
+ << " encrypted: " << (decrypt_config_ != NULL)
+ << " discard_padding (ms): " << discard_padding_.InMilliseconds();
return s.str();
}
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
index 6cf519f4c1d..393e586d06b 100644
--- a/chromium/media/base/decoder_buffer.h
+++ b/chromium/media/base/decoder_buffer.h
@@ -105,6 +105,16 @@ class MEDIA_EXPORT DecoderBuffer
return side_data_size_;
}
+ base::TimeDelta discard_padding() const {
+ DCHECK(!end_of_stream());
+ return discard_padding_;
+ }
+
+ void set_discard_padding(const base::TimeDelta discard_padding) {
+ DCHECK(!end_of_stream());
+ discard_padding_ = discard_padding;
+ }
+
const DecryptConfig* decrypt_config() const {
DCHECK(!end_of_stream());
return decrypt_config_.get();
@@ -142,6 +152,7 @@ class MEDIA_EXPORT DecoderBuffer
int side_data_size_;
scoped_ptr<uint8, base::ScopedPtrAlignedFree> side_data_;
scoped_ptr<DecryptConfig> decrypt_config_;
+ base::TimeDelta discard_padding_;
// Constructor helper method for memory allocations.
void Initialize();
diff --git a/chromium/media/base/demuxer.h b/chromium/media/base/demuxer.h
index 6a91aab896c..853a21a2a75 100644
--- a/chromium/media/base/demuxer.h
+++ b/chromium/media/base/demuxer.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_BASE_DEMUXER_H_
#define MEDIA_BASE_DEMUXER_H_
+#include <vector>
+
#include "base/time/time.h"
#include "media/base/data_source.h"
#include "media/base/demuxer_stream.h"
@@ -29,6 +31,12 @@ class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
class MEDIA_EXPORT Demuxer {
public:
+ // A new potentially encrypted stream has been parsed.
+ // First parameter - The type of initialization data.
+ // Second parameter - The initialization data associated with the stream.
+ typedef base::Callback<void(const std::string& type,
+ const std::vector<uint8>& init_data)> NeedKeyCB;
+
Demuxer();
virtual ~Demuxer();
@@ -47,8 +55,10 @@ class MEDIA_EXPORT Demuxer {
// callback upon completion.
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& status_cb);
- // The pipeline is being stopped either as a result of an error or because
- // the client called Stop().
+ // Starts stopping this demuxer, executing the callback upon completion.
+ //
+ // After the callback completes the demuxer may be destroyed. It is illegal to
+ // call any method (including Stop()) after a demuxer has stopped.
virtual void Stop(const base::Closure& callback);
// This method is called from the pipeline when the audio renderer
diff --git a/chromium/media/base/keyboard_event_counter.cc b/chromium/media/base/keyboard_event_counter.cc
new file mode 100644
index 00000000000..8432aec37e3
--- /dev/null
+++ b/chromium/media/base/keyboard_event_counter.cc
@@ -0,0 +1,42 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/keyboard_event_counter.h"
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+
+namespace media {
+
+KeyboardEventCounter::KeyboardEventCounter() : total_key_presses_(0) {}
+
+KeyboardEventCounter::~KeyboardEventCounter() {}
+
+void KeyboardEventCounter::Reset() {
+ pressed_keys_.clear();
+ base::subtle::NoBarrier_Store(
+ reinterpret_cast<base::subtle::AtomicWord*>(&total_key_presses_), 0);
+}
+
+void KeyboardEventCounter::OnKeyboardEvent(ui::EventType event,
+ ui::KeyboardCode key_code) {
+ // Updates the pressed keys and the total count of key presses.
+ if (event == ui::ET_KEY_PRESSED) {
+ if (pressed_keys_.find(key_code) != pressed_keys_.end())
+ return;
+ pressed_keys_.insert(key_code);
+ base::subtle::NoBarrier_AtomicIncrement(
+ reinterpret_cast<base::subtle::AtomicWord*>(&total_key_presses_), 1);
+ } else {
+ DCHECK_EQ(ui::ET_KEY_RELEASED, event);
+ pressed_keys_.erase(key_code);
+ }
+}
+
+size_t KeyboardEventCounter::GetKeyPressCount() const {
+ return base::subtle::NoBarrier_Load(
+ reinterpret_cast<const base::subtle::AtomicWord*>(&total_key_presses_));
+}
+
+} // namespace media
diff --git a/chromium/media/base/keyboard_event_counter.h b/chromium/media/base/keyboard_event_counter.h
new file mode 100644
index 00000000000..accb659410e
--- /dev/null
+++ b/chromium/media/base/keyboard_event_counter.h
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_KEYBOARD_EVENT_COUNTER_H_
+#define MEDIA_BASE_KEYBOARD_EVENT_COUNTER_H_
+
+#include <set>
+
+#include "base/synchronization/lock.h"
+#include "media/base/media_export.h"
+#include "ui/events/event_constants.h"
+#include "ui/events/keycodes/keyboard_codes.h"
+
+namespace media {
+
+// This class tracks the total number of keypresses based on the OnKeyboardEvent
+// calls it receives from the client.
+// Multiple key down events for the same key are counted as one keypress until
+// the same key is released.
+class MEDIA_EXPORT KeyboardEventCounter {
+ public:
+ KeyboardEventCounter();
+ ~KeyboardEventCounter();
+
+ // Resets the count to 0. Must be called on the same thread as
+ // OnKeyboardEvent.
+ void Reset();
+
+ // Returns the total number of keypresses since its creation or last Reset()
+ // call. Can be called on any thread.
+ size_t GetKeyPressCount() const;
+
+ // The client should call this method on key down or key up events.
+ // Must be called on a single thread.
+ void OnKeyboardEvent(ui::EventType event, ui::KeyboardCode key_code);
+
+ private:
+ // The set of keys currently held down.
+ std::set<ui::KeyboardCode> pressed_keys_;
+
+ size_t total_key_presses_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyboardEventCounter);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_KEYBOARD_EVENT_COUNTER_H_
diff --git a/chromium/media/base/media.cc b/chromium/media/base/media.cc
index e1bb4b2c102..75625fe5f36 100644
--- a/chromium/media/base/media.cc
+++ b/chromium/media/base/media.cc
@@ -46,10 +46,8 @@ class MediaInitializer {
// Perform initialization of libraries which require runtime CPU detection.
// TODO(dalecurtis): Add initialization of YUV, SincResampler.
vector_math::Initialize();
-#if !defined(OS_IOS)
SincResampler::InitializeCPUSpecificFeatures();
InitializeCPUSpecificYUVConversions();
-#endif
}
~MediaInitializer() {
diff --git a/chromium/media/base/media_file_checker_unittest.cc b/chromium/media/base/media_file_checker_unittest.cc
index f43c846776c..ec61edf3e64 100644
--- a/chromium/media/base/media_file_checker_unittest.cc
+++ b/chromium/media/base/media_file_checker_unittest.cc
@@ -39,7 +39,7 @@ TEST(MediaFileCheckerTest, Audio) {
RunMediaFileChecker("sfx.ogg", true);
}
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
TEST(MediaFileCheckerTest, MP3) {
RunMediaFileChecker("sfx.mp3", true);
}
diff --git a/chromium/media/base/media_keys.h b/chromium/media/base/media_keys.h
index 482248394fd..9369c50b563 100644
--- a/chromium/media/base/media_keys.h
+++ b/chromium/media/base/media_keys.h
@@ -29,9 +29,9 @@ class MEDIA_EXPORT MediaKeys {
enum KeyError {
kUnknownError = 1,
kClientError,
- // The following v0.1b values have never been used.
+ // The commented v0.1b values below have never been used.
// kServiceError,
- // kOutputError,
+ kOutputError = 4,
// kHardwareChangeError,
// kDomainError,
kMaxKeyError // Must be last and greater than any legit value.
@@ -82,8 +82,7 @@ typedef base::Callback<void(const std::string& session_id,
typedef base::Callback<void(const std::string& session_id,
const std::string& type,
- scoped_ptr<uint8[]> init_data,
- int init_data_size)> NeedKeyCB;
+ const std::vector<uint8>& init_data)> NeedKeyCB;
} // namespace media
diff --git a/chromium/media/base/media_stub.cc b/chromium/media/base/media_stub.cc
index 9efb37e2a0f..e3e02e40d60 100644
--- a/chromium/media/base/media_stub.cc
+++ b/chromium/media/base/media_stub.cc
@@ -7,7 +7,7 @@
#include "base/files/file_path.h"
// This file is intended for platforms that don't need to load any media
-// libraries (e.g., iOS).
+// libraries (e.g., Android).
namespace media {
namespace internal {
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index 2ebf5dfc7bf..c295a0d991d 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -15,8 +15,8 @@ const char kEnableEac3Playback[] = "enable-eac3-playback";
// Enables Opus playback in media elements.
const char kEnableOpusPlayback[] = "enable-opus-playback";
-// Enables VP8 Alpha playback in media elements.
-const char kEnableVp8AlphaPlayback[] = "enable-vp8-alpha-playback";
+// Disables VP8 Alpha playback in media elements.
+const char kDisableVp8AlphaPlayback[] = "disable-vp8-alpha-playback";
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
@@ -25,6 +25,22 @@ const char kVideoThreads[] = "video-threads";
const char kOverrideEncryptedMediaCanPlayType[] =
"override-encrypted-media-canplaytype";
+// Enables MP3 stream parser for Media Source Extensions.
+const char kEnableMP3StreamParser[] = "enable-mp3-stream-parser";
+
+#if defined(OS_ANDROID)
+// Disables the infobar popup for accessing protected media identifier.
+const char kDisableInfobarForProtectedMediaIdentifier[] =
+ "disable-infobar-for-protected-media-identifier";
+
+// Enables use of MediaDrm for Encrypted Media Extensions implementation.
+const char kEnableMediaDrm[] = "enable-mediadrm";
+
+// Enables use of non-compositing MediaDrm decoding by default for Encrypted
+// Media Extensions implementation.
+const char kMediaDrmEnableNonCompositing[] = "mediadrm-enable-non-compositing";
+#endif
+
#if defined(GOOGLE_TV)
// Use external video surface for video with more than or equal pixels to
// specified value. For example, value of 0 will enable external video surface
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index e6c1de02fe4..963a351cba8 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -18,12 +18,20 @@ MEDIA_EXPORT extern const char kEnableEac3Playback[];
MEDIA_EXPORT extern const char kEnableOpusPlayback[];
-MEDIA_EXPORT extern const char kEnableVp8AlphaPlayback[];
+MEDIA_EXPORT extern const char kDisableVp8AlphaPlayback[];
MEDIA_EXPORT extern const char kVideoThreads[];
MEDIA_EXPORT extern const char kOverrideEncryptedMediaCanPlayType[];
+MEDIA_EXPORT extern const char kEnableMP3StreamParser[];
+
+#if defined(OS_ANDROID)
+MEDIA_EXPORT extern const char kDisableInfobarForProtectedMediaIdentifier[];
+MEDIA_EXPORT extern const char kEnableMediaDrm[];
+MEDIA_EXPORT extern const char kMediaDrmEnableNonCompositing[];
+#endif
+
#if defined(GOOGLE_TV)
MEDIA_EXPORT extern const char kUseExternalVideoSurfaceThresholdInPixels[];
#endif
diff --git a/chromium/media/base/pipeline.cc b/chromium/media/base/pipeline.cc
index ccac81991c4..9790c61cb5c 100644
--- a/chromium/media/base/pipeline.cc
+++ b/chromium/media/base/pipeline.cc
@@ -723,10 +723,14 @@ void Pipeline::StopTask(const base::Closure& stop_cb) {
return;
}
- SetState(kStopping);
- pending_callbacks_.reset();
stop_cb_ = stop_cb;
+ // We may already be stopping due to a runtime error.
+ if (state_ == kStopping)
+ return;
+
+ SetState(kStopping);
+ pending_callbacks_.reset();
DoStop(base::Bind(&Pipeline::OnStopCompleted, base::Unretained(this)));
}
diff --git a/chromium/media/base/pipeline_unittest.cc b/chromium/media/base/pipeline_unittest.cc
index 90c616f92db..4c8640c7807 100644
--- a/chromium/media/base/pipeline_unittest.cc
+++ b/chromium/media/base/pipeline_unittest.cc
@@ -871,6 +871,7 @@ class PipelineTeardownTest : public PipelineTest {
enum StopOrError {
kStop,
kError,
+ kErrorAndStop,
};
PipelineTeardownTest() {}
@@ -1136,13 +1137,24 @@ class PipelineTeardownTest : public PipelineTest {
EXPECT_CALL(*audio_renderer_, Stop(_)).WillOnce(RunClosure<0>());
EXPECT_CALL(*video_renderer_, Stop(_)).WillOnce(RunClosure<0>());
- if (stop_or_error == kStop) {
- EXPECT_CALL(callbacks_, OnStop());
- pipeline_->Stop(base::Bind(
- &CallbackHelper::OnStop, base::Unretained(&callbacks_)));
- } else {
- EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_READ));
- pipeline_->SetErrorForTesting(PIPELINE_ERROR_READ);
+ switch (stop_or_error) {
+ case kStop:
+ EXPECT_CALL(callbacks_, OnStop());
+ pipeline_->Stop(base::Bind(
+ &CallbackHelper::OnStop, base::Unretained(&callbacks_)));
+ break;
+
+ case kError:
+ EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_READ));
+ pipeline_->SetErrorForTesting(PIPELINE_ERROR_READ);
+ break;
+
+ case kErrorAndStop:
+ EXPECT_CALL(callbacks_, OnStop());
+ pipeline_->SetErrorForTesting(PIPELINE_ERROR_READ);
+ pipeline_->Stop(base::Bind(
+ &CallbackHelper::OnStop, base::Unretained(&callbacks_)));
+ break;
}
message_loop_.RunUntilIdle();
@@ -1176,4 +1188,6 @@ INSTANTIATE_TEARDOWN_TEST(Error, Prerolling);
INSTANTIATE_TEARDOWN_TEST(Error, Starting);
INSTANTIATE_TEARDOWN_TEST(Error, Playing);
+INSTANTIATE_TEARDOWN_TEST(ErrorAndStop, Playing);
+
} // namespace media
diff --git a/chromium/media/base/run_all_unittests.cc b/chromium/media/base/run_all_unittests.cc
index 28ef5c68f7a..1c4da930470 100644
--- a/chromium/media/base/run_all_unittests.cc
+++ b/chromium/media/base/run_all_unittests.cc
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/bind.h"
#include "base/command_line.h"
#include "base/test/test_suite.h"
+#include "base/test/unit_test_launcher.h"
#include "build/build_config.h"
#include "media/base/media.h"
#include "media/base/media_switches.h"
@@ -37,13 +39,18 @@ void TestSuiteNoAtExit::Initialize() {
// Run this here instead of main() to ensure an AtExitManager is already
// present.
media::InitializeMediaLibraryForTesting();
- // Enable VP8 alpha support for all media tests.
- // TODO(tomfinegan): Remove this once the VP8 alpha flag is removed or
- // negated.
CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- cmd_line->AppendSwitch(switches::kEnableVp8AlphaPlayback);
+ cmd_line->AppendSwitch(switches::kEnableMP3StreamParser);
+
+ // Enable Opus support for all media tests.
+ // TODO(vigneshv): Remove this once the Opus flag is removed or negated.
+ cmd_line->AppendSwitch(switches::kEnableOpusPlayback);
}
int main(int argc, char** argv) {
- return TestSuiteNoAtExit(argc, argv).Run();
+ TestSuiteNoAtExit test_suite(argc, argv);
+
+ return base::LaunchUnitTests(
+ argc, argv, base::Bind(&TestSuiteNoAtExit::Run,
+ base::Unretained(&test_suite)));
}
diff --git a/chromium/media/base/scoped_histogram_timer_unittest.cc b/chromium/media/base/scoped_histogram_timer_unittest.cc
index b8893f9713b..47e228eb111 100644
--- a/chromium/media/base/scoped_histogram_timer_unittest.cc
+++ b/chromium/media/base/scoped_histogram_timer_unittest.cc
@@ -13,4 +13,4 @@ TEST(ScopedHistogramTimer, TwoTimersOneScope) {
SCOPED_UMA_HISTOGRAM_TIMER("TestTimer1");
}
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/base/serial_runner.cc b/chromium/media/base/serial_runner.cc
index fa391331467..dfc4a0b9fc3 100644
--- a/chromium/media/base/serial_runner.cc
+++ b/chromium/media/base/serial_runner.cc
@@ -59,9 +59,15 @@ SerialRunner::SerialRunner(
message_loop_(base::MessageLoopProxy::current()),
bound_fns_(bound_fns),
done_cb_(done_cb) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr(),
- PIPELINE_OK));
+ // Respect both cancellation and calling stack guarantees for |done_cb|
+ // when empty.
+ if (bound_fns_.empty()) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr(), PIPELINE_OK));
+ return;
+ }
+
+ RunNextInSeries(PIPELINE_OK);
}
SerialRunner::~SerialRunner() {}
diff --git a/chromium/media/base/serial_runner.h b/chromium/media/base/serial_runner.h
index a59c7753c9c..eaae625cd43 100644
--- a/chromium/media/base/serial_runner.h
+++ b/chromium/media/base/serial_runner.h
@@ -11,6 +11,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
+#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
namespace base {
@@ -22,13 +23,13 @@ namespace media {
// Runs a series of bound functions accepting Closures or PipelineStatusCB.
// SerialRunner doesn't use regular Closure/PipelineStatusCBs as it late binds
// the completion callback as the series progresses.
-class SerialRunner {
+class MEDIA_EXPORT SerialRunner {
public:
typedef base::Callback<void(const base::Closure&)> BoundClosure;
typedef base::Callback<void(const PipelineStatusCB&)> BoundPipelineStatusCB;
// Serial queue of bound functions to run.
- class Queue {
+ class MEDIA_EXPORT Queue {
public:
Queue();
~Queue();
@@ -50,6 +51,13 @@ class SerialRunner {
// All bound functions are executed on the thread that Run() is called on,
// including |done_cb|.
//
+ // To eliminate an unnecessary posted task, the first function is executed
+ // immediately on the caller's stack. It is *strongly advised* to ensure
+ // the calling code does no more work after the call to Run().
+ //
+ // In all cases, |done_cb| is guaranteed to execute on a separate calling
+ // stack.
+ //
// Deleting the object will prevent execution of any unstarted bound
// functions, including |done_cb|.
static scoped_ptr<SerialRunner> Run(
diff --git a/chromium/media/base/serial_runner_unittest.cc b/chromium/media/base/serial_runner_unittest.cc
new file mode 100644
index 00000000000..6d21968c0a4
--- /dev/null
+++ b/chromium/media/base/serial_runner_unittest.cc
@@ -0,0 +1,176 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/debug/stack_trace.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/serial_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class SerialRunnerTest : public ::testing::Test {
+ public:
+ SerialRunnerTest()
+ : inside_start_(false), done_called_(false), done_status_(PIPELINE_OK) {}
+ virtual ~SerialRunnerTest() {}
+
+ void RunSerialRunner() {
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &SerialRunnerTest::StartRunnerInternal, base::Unretained(this),
+ bound_fns_));
+ message_loop_.RunUntilIdle();
+ }
+
+ // Pushes a bound function to the queue that will run its callback with
+ // |status|. called(i) returns whether the i'th bound function pushed to the
+ // queue was called while running the SerialRunner.
+ void PushBoundFunction(PipelineStatus status) {
+ bound_fns_.Push(base::Bind(&SerialRunnerTest::RunBoundFunction,
+ base::Unretained(this),
+ status,
+ called_.size()));
+ called_.push_back(false);
+ }
+
+ // Push a bound function to the queue that will delete the SerialRunner,
+ // which should cancel all remaining queued work.
+ void PushCancellation() {
+ bound_fns_.Push(base::Bind(&SerialRunnerTest::CancelSerialRunner,
+ base::Unretained(this)));
+ }
+
+ // Queries final status of pushed functions and done callback. Valid only
+ // after calling RunSerialRunner().
+ bool called(size_t index) { return called_[index]; }
+ bool done_called() { return done_called_; }
+ PipelineStatus done_status() { return done_status_; }
+
+ private:
+ void RunBoundFunction(PipelineStatus status,
+ size_t index,
+ const PipelineStatusCB& status_cb) {
+ EXPECT_EQ(index == 0u, inside_start_)
+ << "First bound function should run on same stack as "
+ << "SerialRunner::Run() while all others should not\n"
+ << base::debug::StackTrace().ToString();
+
+ called_[index] = true;
+ status_cb.Run(status);
+ }
+
+ void StartRunnerInternal(const SerialRunner::Queue& bound_fns) {
+ inside_start_ = true;
+ runner_ = SerialRunner::Run(bound_fns_, base::Bind(
+ &SerialRunnerTest::DoneCallback, base::Unretained(this)));
+ inside_start_ = false;
+ }
+
+ void DoneCallback(PipelineStatus status) {
+ EXPECT_FALSE(inside_start_)
+ << "Done callback should not run on same stack as SerialRunner::Run()\n"
+ << base::debug::StackTrace().ToString();
+
+ done_called_ = true;
+ done_status_ = status;
+ message_loop_.QuitWhenIdle();
+ }
+
+ void CancelSerialRunner(const PipelineStatusCB& status_cb) {
+ // Tasks run by |runner_| shouldn't reset it, hence we post a task to do so.
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &SerialRunnerTest::ResetSerialRunner, base::Unretained(this)));
+ status_cb.Run(PIPELINE_OK);
+ }
+
+ void ResetSerialRunner() {
+ runner_.reset();
+ }
+
+ base::MessageLoop message_loop_;
+ SerialRunner::Queue bound_fns_;
+ scoped_ptr<SerialRunner> runner_;
+
+ // Used to enforce calling stack guarantees of the API.
+ bool inside_start_;
+
+ // Tracks whether the i'th bound function was called.
+ std::vector<bool> called_;
+
+ // Tracks whether the final done callback was called + resulting status.
+ bool done_called_;
+ PipelineStatus done_status_;
+
+ DISALLOW_COPY_AND_ASSIGN(SerialRunnerTest);
+};
+
+TEST_F(SerialRunnerTest, Empty) {
+ RunSerialRunner();
+
+ EXPECT_TRUE(done_called());
+ EXPECT_EQ(PIPELINE_OK, done_status());
+}
+
+TEST_F(SerialRunnerTest, Single) {
+ PushBoundFunction(PIPELINE_OK);
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_TRUE(done_called());
+ EXPECT_EQ(PIPELINE_OK, done_status());
+}
+
+TEST_F(SerialRunnerTest, Single_Error) {
+ PushBoundFunction(PIPELINE_ERROR_ABORT);
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_TRUE(done_called());
+ EXPECT_EQ(PIPELINE_ERROR_ABORT, done_status());
+}
+
+TEST_F(SerialRunnerTest, Single_Cancel) {
+ PushBoundFunction(PIPELINE_OK);
+ PushCancellation();
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_FALSE(done_called());
+}
+
+TEST_F(SerialRunnerTest, Multiple) {
+ PushBoundFunction(PIPELINE_OK);
+ PushBoundFunction(PIPELINE_OK);
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_TRUE(called(1));
+ EXPECT_TRUE(done_called());
+ EXPECT_EQ(PIPELINE_OK, done_status());
+}
+
+TEST_F(SerialRunnerTest, Multiple_Error) {
+ PushBoundFunction(PIPELINE_ERROR_ABORT);
+ PushBoundFunction(PIPELINE_OK);
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_FALSE(called(1)); // A bad status cancels remaining work.
+ EXPECT_TRUE(done_called());
+ EXPECT_EQ(PIPELINE_ERROR_ABORT, done_status());
+}
+
+TEST_F(SerialRunnerTest, Multiple_Cancel) {
+ PushBoundFunction(PIPELINE_OK);
+ PushCancellation();
+ PushBoundFunction(PIPELINE_OK);
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_FALSE(called(1));
+ EXPECT_FALSE(done_called());
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc b/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
index f99a2fef840..124671c0c03 100644
--- a/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
@@ -21,6 +21,18 @@ namespace media {
// Define a convenient macro to do static cast.
#define INT16_FIX(x) static_cast<int16>(FIX(x))
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
+ INT16_FIX(0.257), INT16_FIX(0.504), INT16_FIX(0.098), 0,
+ INT16_FIX(0.257), INT16_FIX(0.504), INT16_FIX(0.098), 0,
+ -INT16_FIX(0.148), -INT16_FIX(0.291), INT16_FIX(0.439), 0,
+ -INT16_FIX(0.148), -INT16_FIX(0.291), INT16_FIX(0.439), 0,
+ INT16_FIX(0.439), -INT16_FIX(0.368), -INT16_FIX(0.071), 0,
+ INT16_FIX(0.439), -INT16_FIX(0.368), -INT16_FIX(0.071), 0,
+};
+#else
SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
@@ -29,6 +41,7 @@ SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
-INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
-INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
};
+#endif
#undef INT16_FIX
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_c.cc b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
index b8ebd1eeb12..0466112918d 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
@@ -20,31 +20,39 @@ namespace media {
#define SK_G32_SHIFT 8
#define SK_B32_SHIFT 16
#define SK_A32_SHIFT 24
+#define R_INDEX 0
+#define G_INDEX 1
+#define B_INDEX 2
+#define A_INDEX 3
#else
#define SK_B32_SHIFT 0
#define SK_G32_SHIFT 8
#define SK_R32_SHIFT 16
#define SK_A32_SHIFT 24
+#define B_INDEX 0
+#define G_INDEX 1
+#define R_INDEX 2
+#define A_INDEX 3
#endif
static inline void ConvertYUVToRGB32_C(uint8 y,
uint8 u,
uint8 v,
uint8* rgb_buf) {
- int b = kCoefficientsRgbY[256+u][0];
- int g = kCoefficientsRgbY[256+u][1];
- int r = kCoefficientsRgbY[256+u][2];
- int a = kCoefficientsRgbY[256+u][3];
+ int b = kCoefficientsRgbY[256+u][B_INDEX];
+ int g = kCoefficientsRgbY[256+u][G_INDEX];
+ int r = kCoefficientsRgbY[256+u][R_INDEX];
+ int a = kCoefficientsRgbY[256+u][A_INDEX];
- b = paddsw(b, kCoefficientsRgbY[512+v][0]);
- g = paddsw(g, kCoefficientsRgbY[512+v][1]);
- r = paddsw(r, kCoefficientsRgbY[512+v][2]);
- a = paddsw(a, kCoefficientsRgbY[512+v][3]);
+ b = paddsw(b, kCoefficientsRgbY[512+v][B_INDEX]);
+ g = paddsw(g, kCoefficientsRgbY[512+v][G_INDEX]);
+ r = paddsw(r, kCoefficientsRgbY[512+v][R_INDEX]);
+ a = paddsw(a, kCoefficientsRgbY[512+v][A_INDEX]);
- b = paddsw(b, kCoefficientsRgbY[y][0]);
- g = paddsw(g, kCoefficientsRgbY[y][1]);
- r = paddsw(r, kCoefficientsRgbY[y][2]);
- a = paddsw(a, kCoefficientsRgbY[y][3]);
+ b = paddsw(b, kCoefficientsRgbY[y][B_INDEX]);
+ g = paddsw(g, kCoefficientsRgbY[y][G_INDEX]);
+ r = paddsw(r, kCoefficientsRgbY[y][R_INDEX]);
+ a = paddsw(a, kCoefficientsRgbY[y][A_INDEX]);
b >>= 6;
g >>= 6;
diff --git a/chromium/media/base/simd/yuv_to_rgb_table.cc b/chromium/media/base/simd/yuv_to_rgb_table.cc
index 00735655f5f..253280da951 100644
--- a/chromium/media/base/simd/yuv_to_rgb_table.cc
+++ b/chromium/media/base/simd/yuv_to_rgb_table.cc
@@ -17,20 +17,42 @@ extern "C" {
// Defines the R,G,B,A contributions from U.
// The contribution to A is the same for any value of U
// causing the final A value to be 255 in every conversion.
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+#define RGBU(i) { \
+ 0, \
+ static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(256 * 64 - 1) \
+}
+#else
#define RGBU(i) { \
static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
0, \
static_cast<int16>(256 * 64 - 1) \
}
+#endif
// Defines the R,G,B,A contributions from V.
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+#define RGBV(i) { \
+ static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
+ 0, \
+ 0 \
+}
+#else
#define RGBV(i) { \
0, \
static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
0 \
}
+#endif
// Used to define a set of multiplier words for each alpha level.
#define ALPHA(i) { \
diff --git a/chromium/media/base/sinc_resampler.cc b/chromium/media/base/sinc_resampler.cc
index a2918c3f0d2..5566f64ce86 100644
--- a/chromium/media/base/sinc_resampler.cc
+++ b/chromium/media/base/sinc_resampler.cc
@@ -108,9 +108,8 @@ static double SincScaleFactor(double io_ratio) {
// If we know the minimum architecture at compile time, avoid CPU detection.
// Force NaCl code to use C routines since (at present) nothing there uses these
-// methods and plumbing the -msse built library is non-trivial. iOS lies
-// about its architecture, so we also need to exclude it here.
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL) && !defined(OS_IOS)
+// methods and plumbing the -msse built library is non-trivial.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
#if defined(__SSE__)
#define CONVOLVE_FUNC Convolve_SSE
void SincResampler::InitializeCPUSpecificFeatures() {}
diff --git a/chromium/media/base/stream_parser.h b/chromium/media/base/stream_parser.h
index a0fbb71a924..33a336def8b 100644
--- a/chromium/media/base/stream_parser.h
+++ b/chromium/media/base/stream_parser.h
@@ -73,9 +73,8 @@ class MEDIA_EXPORT StreamParser {
// First parameter - The type of the initialization data associated with the
// stream.
// Second parameter - The initialization data associated with the stream.
- // Third parameter - Number of bytes of the initialization data.
typedef base::Callback<void(const std::string&,
- scoped_ptr<uint8[]>, int)> NeedKeyCB;
+ const std::vector<uint8>&)> NeedKeyCB;
// Initialize the parser with necessary callbacks. Must be called before any
// data is passed to Parse(). |init_cb| will be called once enough data has
diff --git a/chromium/media/base/user_input_monitor.cc b/chromium/media/base/user_input_monitor.cc
new file mode 100644
index 00000000000..e43cd626a8f
--- /dev/null
+++ b/chromium/media/base/user_input_monitor.cc
@@ -0,0 +1,74 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/user_input_monitor.h"
+
+#include "base/logging.h"
+#include "third_party/skia/include/core/SkPoint.h"
+
+namespace media {
+
+#ifdef DISABLE_USER_INPUT_MONITOR
+scoped_ptr<UserInputMonitor> UserInputMonitor::Create(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner) {
+ return scoped_ptr<UserInputMonitor>();
+}
+#endif // DISABLE_USER_INPUT_MONITOR
+
+UserInputMonitor::UserInputMonitor()
+ : key_press_counter_references_(0),
+ mouse_listeners_count_(0),
+ mouse_listeners_(new MouseListenerList()) {}
+
+UserInputMonitor::~UserInputMonitor() {
+ DCHECK_EQ(0u, key_press_counter_references_);
+ mouse_listeners_->AssertEmpty();
+}
+
+void UserInputMonitor::AddMouseListener(MouseEventListener* listener) {
+ mouse_listeners_->AddObserver(listener);
+ {
+ base::AutoLock auto_lock(lock_);
+ mouse_listeners_count_++;
+ if (mouse_listeners_count_ == 1) {
+ StartMouseMonitoring();
+ DVLOG(2) << "Started mouse monitoring.";
+ }
+ }
+}
+
+void UserInputMonitor::RemoveMouseListener(MouseEventListener* listener) {
+ mouse_listeners_->RemoveObserver(listener);
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_NE(mouse_listeners_count_, 0u);
+ mouse_listeners_count_--;
+ if (mouse_listeners_count_ == 0) {
+ StopMouseMonitoring();
+ DVLOG(2) << "Stopped mouse monitoring.";
+ }
+ }
+}
+
+void UserInputMonitor::EnableKeyPressMonitoring() {
+ base::AutoLock auto_lock(lock_);
+ ++key_press_counter_references_;
+ if (key_press_counter_references_ == 1) {
+ StartKeyboardMonitoring();
+ DVLOG(2) << "Started keyboard monitoring.";
+ }
+}
+
+void UserInputMonitor::DisableKeyPressMonitoring() {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_NE(key_press_counter_references_, 0u);
+ --key_press_counter_references_;
+ if (key_press_counter_references_ == 0) {
+ StopKeyboardMonitoring();
+ DVLOG(2) << "Stopped keyboard monitoring.";
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/user_input_monitor.h b/chromium/media/base/user_input_monitor.h
new file mode 100644
index 00000000000..ab572694ed3
--- /dev/null
+++ b/chromium/media/base/user_input_monitor.h
@@ -0,0 +1,89 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_USER_INPUT_MONITOR_H_
+#define MEDIA_BASE_USER_INPUT_MONITOR_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/synchronization/lock.h"
+#include "media/base/media_export.h"
+
+struct SkIPoint;
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
+namespace media {
+
+// Monitors and notifies about mouse movements and keyboard events.
+// Thread safe. The listeners are called on the thread where the listeners are
+// added.
+class MEDIA_EXPORT UserInputMonitor {
+ public:
+ // The interface to receive mouse movement events.
+ class MEDIA_EXPORT MouseEventListener {
+ public:
+ // |position| is the new mouse position.
+ virtual void OnMouseMoved(const SkIPoint& position) = 0;
+
+ protected:
+ virtual ~MouseEventListener() {}
+ };
+ typedef ObserverListThreadSafe<UserInputMonitor::MouseEventListener>
+ MouseListenerList;
+
+ UserInputMonitor();
+ virtual ~UserInputMonitor();
+
+ // Creates a platform-specific instance of UserInputMonitor.
+ // |io_task_runner| is the task runner for an IO thread.
+ // |ui_task_runner| is the task runner for a UI thread.
+ static scoped_ptr<UserInputMonitor> Create(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner);
+
+ // The same |listener| should only be added once.
+ // The clients should make sure to call Remove*Listener before |listener| is
+ // destroyed.
+ void AddMouseListener(MouseEventListener* listener);
+ void RemoveMouseListener(MouseEventListener* listener);
+
+ // A caller must call EnableKeyPressMonitoring and
+ // DisableKeyPressMonitoring in pair.
+ void EnableKeyPressMonitoring();
+ void DisableKeyPressMonitoring();
+
+ // Returns the number of keypresses. The starting point from when it is
+ // counted is not guaranteed, but consistent within the pair of calls of
+ // EnableKeyPressMonitoring and DisableKeyPressMonitoring. So a caller can
+ // use the difference between the values returned at two times to get the
+ // number of keypresses happened within that time period, but should not make
+ // any assumption on the initial value.
+ virtual size_t GetKeyPressCount() const = 0;
+
+ protected:
+ scoped_refptr<MouseListenerList> mouse_listeners() {
+ return mouse_listeners_;
+ }
+
+ private:
+ virtual void StartKeyboardMonitoring() = 0;
+ virtual void StopKeyboardMonitoring() = 0;
+ virtual void StartMouseMonitoring() = 0;
+ virtual void StopMouseMonitoring() = 0;
+
+ base::Lock lock_;
+ size_t key_press_counter_references_;
+ size_t mouse_listeners_count_;
+ scoped_refptr<MouseListenerList> mouse_listeners_;
+
+ DISALLOW_COPY_AND_ASSIGN(UserInputMonitor);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_USER_INPUT_MONITOR_H_
diff --git a/chromium/media/base/user_input_monitor_linux.cc b/chromium/media/base/user_input_monitor_linux.cc
new file mode 100644
index 00000000000..b5dbbe5e0bb
--- /dev/null
+++ b/chromium/media/base/user_input_monitor_linux.cc
@@ -0,0 +1,362 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/user_input_monitor.h"
+
+#include <sys/select.h>
+#include <unistd.h>
+#define XK_MISCELLANY
+#include <X11/keysymdef.h>
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_pump_libevent.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "media/base/keyboard_event_counter.h"
+#include "third_party/skia/include/core/SkPoint.h"
+#include "ui/events/keycodes/keyboard_code_conversion_x.h"
+
+// These includes need to be later than dictated by the style guide due to
+// Xlib header pollution, specifically the min, max, and Status macros.
+#include <X11/XKBlib.h>
+#include <X11/Xlibint.h>
+#include <X11/extensions/record.h>
+
+namespace media {
+namespace {
+
+// This is the actual implementation of event monitoring. It's separated from
+// UserInputMonitorLinux since it needs to be deleted on the IO thread.
+class UserInputMonitorLinuxCore
+ : public base::MessagePumpLibevent::Watcher,
+ public base::SupportsWeakPtr<UserInputMonitorLinuxCore> {
+ public:
+ enum EventType {
+ MOUSE_EVENT,
+ KEYBOARD_EVENT
+ };
+
+ explicit UserInputMonitorLinuxCore(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ const scoped_refptr<UserInputMonitor::MouseListenerList>&
+ mouse_listeners);
+ virtual ~UserInputMonitorLinuxCore();
+
+ size_t GetKeyPressCount() const;
+ void StartMonitor(EventType type);
+ void StopMonitor(EventType type);
+
+ private:
+ // base::MessagePumpLibevent::Watcher interface.
+ virtual void OnFileCanReadWithoutBlocking(int fd) OVERRIDE;
+ virtual void OnFileCanWriteWithoutBlocking(int fd) OVERRIDE;
+
+ // Processes key and mouse events.
+ void ProcessXEvent(xEvent* event);
+ static void ProcessReply(XPointer self, XRecordInterceptData* data);
+
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ scoped_refptr<ObserverListThreadSafe<UserInputMonitor::MouseEventListener> >
+ mouse_listeners_;
+
+ //
+ // The following members should only be accessed on the IO thread.
+ //
+ base::MessagePumpLibevent::FileDescriptorWatcher controller_;
+ Display* x_control_display_;
+ Display* x_record_display_;
+ XRecordRange* x_record_range_[2];
+ XRecordContext x_record_context_;
+ KeyboardEventCounter counter_;
+
+ DISALLOW_COPY_AND_ASSIGN(UserInputMonitorLinuxCore);
+};
+
+class UserInputMonitorLinux : public UserInputMonitor {
+ public:
+ explicit UserInputMonitorLinux(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
+ virtual ~UserInputMonitorLinux();
+
+ // Public UserInputMonitor overrides.
+ virtual size_t GetKeyPressCount() const OVERRIDE;
+
+ private:
+ // Private UserInputMonitor overrides.
+ virtual void StartKeyboardMonitoring() OVERRIDE;
+ virtual void StopKeyboardMonitoring() OVERRIDE;
+ virtual void StartMouseMonitoring() OVERRIDE;
+ virtual void StopMouseMonitoring() OVERRIDE;
+
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ UserInputMonitorLinuxCore* core_;
+
+ DISALLOW_COPY_AND_ASSIGN(UserInputMonitorLinux);
+};
+
+UserInputMonitorLinuxCore::UserInputMonitorLinuxCore(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ const scoped_refptr<UserInputMonitor::MouseListenerList>& mouse_listeners)
+ : io_task_runner_(io_task_runner),
+ mouse_listeners_(mouse_listeners),
+ x_control_display_(NULL),
+ x_record_display_(NULL),
+ x_record_context_(0) {
+ x_record_range_[0] = NULL;
+ x_record_range_[1] = NULL;
+}
+
+UserInputMonitorLinuxCore::~UserInputMonitorLinuxCore() {
+ DCHECK(!x_control_display_);
+ DCHECK(!x_record_display_);
+ DCHECK(!x_record_range_[0]);
+ DCHECK(!x_record_range_[1]);
+ DCHECK(!x_record_context_);
+}
+
+size_t UserInputMonitorLinuxCore::GetKeyPressCount() const {
+ return counter_.GetKeyPressCount();
+}
+
+void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+
+ if (type == KEYBOARD_EVENT)
+ counter_.Reset();
+
+ // TODO(jamiewalch): We should pass the display in. At that point, since
+ // XRecord needs a private connection to the X Server for its data channel
+ // and both channels are used from a separate thread, we'll need to duplicate
+ // them with something like the following:
+ // XOpenDisplay(DisplayString(display));
+ if (!x_control_display_)
+ x_control_display_ = XOpenDisplay(NULL);
+
+ if (!x_record_display_)
+ x_record_display_ = XOpenDisplay(NULL);
+
+ if (!x_control_display_ || !x_record_display_) {
+ LOG(ERROR) << "Couldn't open X display";
+ return;
+ }
+
+ int xr_opcode, xr_event, xr_error;
+ if (!XQueryExtension(
+ x_control_display_, "RECORD", &xr_opcode, &xr_event, &xr_error)) {
+ LOG(ERROR) << "X Record extension not available.";
+ return;
+ }
+
+ if (!x_record_range_[type])
+ x_record_range_[type] = XRecordAllocRange();
+
+ if (!x_record_range_[type]) {
+ LOG(ERROR) << "XRecordAllocRange failed.";
+ return;
+ }
+
+ if (type == MOUSE_EVENT) {
+ x_record_range_[type]->device_events.first = MotionNotify;
+ x_record_range_[type]->device_events.last = MotionNotify;
+ } else {
+ DCHECK_EQ(KEYBOARD_EVENT, type);
+ x_record_range_[type]->device_events.first = KeyPress;
+ x_record_range_[type]->device_events.last = KeyRelease;
+ }
+
+ if (x_record_context_) {
+ XRecordDisableContext(x_control_display_, x_record_context_);
+ XFlush(x_control_display_);
+ XRecordFreeContext(x_record_display_, x_record_context_);
+ x_record_context_ = 0;
+ }
+ XRecordRange** record_range_to_use =
+ (x_record_range_[0] && x_record_range_[1]) ? x_record_range_
+ : &x_record_range_[type];
+ int number_of_ranges = (x_record_range_[0] && x_record_range_[1]) ? 2 : 1;
+
+ XRecordClientSpec client_spec = XRecordAllClients;
+ x_record_context_ = XRecordCreateContext(x_record_display_,
+ 0,
+ &client_spec,
+ 1,
+ record_range_to_use,
+ number_of_ranges);
+ if (!x_record_context_) {
+ LOG(ERROR) << "XRecordCreateContext failed.";
+ return;
+ }
+
+ if (!XRecordEnableContextAsync(x_record_display_,
+ x_record_context_,
+ &UserInputMonitorLinuxCore::ProcessReply,
+ reinterpret_cast<XPointer>(this))) {
+ LOG(ERROR) << "XRecordEnableContextAsync failed.";
+ return;
+ }
+
+ if (!x_record_range_[0] || !x_record_range_[1]) {
+ // Register OnFileCanReadWithoutBlocking() to be called every time there is
+ // something to read from |x_record_display_|.
+ base::MessageLoopForIO* message_loop = base::MessageLoopForIO::current();
+ int result =
+ message_loop->WatchFileDescriptor(ConnectionNumber(x_record_display_),
+ true,
+ base::MessageLoopForIO::WATCH_READ,
+ &controller_,
+ this);
+ if (!result) {
+ LOG(ERROR) << "Failed to create X record task.";
+ return;
+ }
+ }
+
+ // Fetch pending events if any.
+ OnFileCanReadWithoutBlocking(ConnectionNumber(x_record_display_));
+}
+
+void UserInputMonitorLinuxCore::StopMonitor(EventType type) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+
+ if (x_record_range_[type]) {
+ XFree(x_record_range_[type]);
+ x_record_range_[type] = NULL;
+ }
+ if (x_record_range_[0] || x_record_range_[1])
+ return;
+
+ // Context must be disabled via the control channel because we can't send
+ // any X protocol traffic over the data channel while it's recording.
+ if (x_record_context_) {
+ XRecordDisableContext(x_control_display_, x_record_context_);
+ XFlush(x_control_display_);
+ XRecordFreeContext(x_record_display_, x_record_context_);
+ x_record_context_ = 0;
+
+ controller_.StopWatchingFileDescriptor();
+ if (x_record_display_) {
+ XCloseDisplay(x_record_display_);
+ x_record_display_ = NULL;
+ }
+ if (x_control_display_) {
+ XCloseDisplay(x_control_display_);
+ x_control_display_ = NULL;
+ }
+ }
+}
+
+void UserInputMonitorLinuxCore::OnFileCanReadWithoutBlocking(int fd) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ XEvent event;
+ // Fetch pending events if any.
+ while (XPending(x_record_display_)) {
+ XNextEvent(x_record_display_, &event);
+ }
+}
+
+void UserInputMonitorLinuxCore::OnFileCanWriteWithoutBlocking(int fd) {
+ NOTREACHED();
+}
+
+void UserInputMonitorLinuxCore::ProcessXEvent(xEvent* event) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ if (event->u.u.type == MotionNotify) {
+ SkIPoint position(SkIPoint::Make(event->u.keyButtonPointer.rootX,
+ event->u.keyButtonPointer.rootY));
+ mouse_listeners_->Notify(
+ &UserInputMonitor::MouseEventListener::OnMouseMoved, position);
+ } else {
+ ui::EventType type;
+ if (event->u.u.type == KeyPress) {
+ type = ui::ET_KEY_PRESSED;
+ } else if (event->u.u.type == KeyRelease) {
+ type = ui::ET_KEY_RELEASED;
+ } else {
+ NOTREACHED();
+ return;
+ }
+
+ KeySym key_sym =
+ XkbKeycodeToKeysym(x_control_display_, event->u.u.detail, 0, 0);
+ ui::KeyboardCode key_code = ui::KeyboardCodeFromXKeysym(key_sym);
+ counter_.OnKeyboardEvent(type, key_code);
+ }
+}
+
+// static
+void UserInputMonitorLinuxCore::ProcessReply(XPointer self,
+ XRecordInterceptData* data) {
+ if (data->category == XRecordFromServer) {
+ xEvent* event = reinterpret_cast<xEvent*>(data->data);
+ reinterpret_cast<UserInputMonitorLinuxCore*>(self)->ProcessXEvent(event);
+ }
+ XRecordFreeData(data);
+}
+
+//
+// Implementation of UserInputMonitorLinux.
+//
+
+UserInputMonitorLinux::UserInputMonitorLinux(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : io_task_runner_(io_task_runner),
+ core_(new UserInputMonitorLinuxCore(io_task_runner, mouse_listeners())) {}
+
+UserInputMonitorLinux::~UserInputMonitorLinux() {
+ if (!io_task_runner_->DeleteSoon(FROM_HERE, core_))
+ delete core_;
+}
+
+size_t UserInputMonitorLinux::GetKeyPressCount() const {
+ return core_->GetKeyPressCount();
+}
+
+void UserInputMonitorLinux::StartKeyboardMonitoring() {
+ io_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&UserInputMonitorLinuxCore::StartMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorLinuxCore::KEYBOARD_EVENT));
+}
+
+void UserInputMonitorLinux::StopKeyboardMonitoring() {
+ io_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&UserInputMonitorLinuxCore::StopMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorLinuxCore::KEYBOARD_EVENT));
+}
+
+void UserInputMonitorLinux::StartMouseMonitoring() {
+ io_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&UserInputMonitorLinuxCore::StartMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorLinuxCore::MOUSE_EVENT));
+}
+
+void UserInputMonitorLinux::StopMouseMonitoring() {
+ io_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&UserInputMonitorLinuxCore::StopMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorLinuxCore::MOUSE_EVENT));
+}
+
+} // namespace
+
+scoped_ptr<UserInputMonitor> UserInputMonitor::Create(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner) {
+ return scoped_ptr<UserInputMonitor>(
+ new UserInputMonitorLinux(io_task_runner));
+}
+
+} // namespace media
diff --git a/chromium/media/base/user_input_monitor_mac.cc b/chromium/media/base/user_input_monitor_mac.cc
new file mode 100644
index 00000000000..f70ee4d06a4
--- /dev/null
+++ b/chromium/media/base/user_input_monitor_mac.cc
@@ -0,0 +1,57 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/user_input_monitor.h"
+
+#include <ApplicationServices/ApplicationServices.h>
+
+namespace media {
+namespace {
+
+class UserInputMonitorMac : public UserInputMonitor {
+ public:
+ UserInputMonitorMac();
+ virtual ~UserInputMonitorMac();
+
+ virtual size_t GetKeyPressCount() const OVERRIDE;
+
+ private:
+ virtual void StartKeyboardMonitoring() OVERRIDE;
+ virtual void StopKeyboardMonitoring() OVERRIDE;
+ virtual void StartMouseMonitoring() OVERRIDE;
+ virtual void StopMouseMonitoring() OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(UserInputMonitorMac);
+};
+
+UserInputMonitorMac::UserInputMonitorMac() {}
+
+UserInputMonitorMac::~UserInputMonitorMac() {}
+
+size_t UserInputMonitorMac::GetKeyPressCount() const {
+ // Use |kCGEventSourceStateHIDSystemState| since we only want to count
+ // hardware generated events.
+ return CGEventSourceCounterForEventType(kCGEventSourceStateHIDSystemState,
+ kCGEventKeyDown);
+}
+
+void UserInputMonitorMac::StartKeyboardMonitoring() {}
+
+void UserInputMonitorMac::StopKeyboardMonitoring() {}
+
+// TODO(jiayl): add the impl.
+void UserInputMonitorMac::StartMouseMonitoring() { NOTIMPLEMENTED(); }
+
+// TODO(jiayl): add the impl.
+void UserInputMonitorMac::StopMouseMonitoring() { NOTIMPLEMENTED(); }
+
+} // namespace
+
+scoped_ptr<UserInputMonitor> UserInputMonitor::Create(
+ const scoped_refptr<base::SingleThreadTaskRunner>& input_task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner) {
+ return scoped_ptr<UserInputMonitor>(new UserInputMonitorMac());
+}
+
+} // namespace media
diff --git a/chromium/media/base/user_input_monitor_unittest.cc b/chromium/media/base/user_input_monitor_unittest.cc
new file mode 100644
index 00000000000..4874a10eb82
--- /dev/null
+++ b/chromium/media/base/user_input_monitor_unittest.cc
@@ -0,0 +1,78 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "media/base/keyboard_event_counter.h"
+#include "media/base/user_input_monitor.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkPoint.h"
+
+namespace media {
+
+class MockMouseListener : public UserInputMonitor::MouseEventListener {
+ public:
+ MOCK_METHOD1(OnMouseMoved, void(const SkIPoint& position));
+
+ virtual ~MockMouseListener() {}
+};
+
+#if defined(OS_LINUX) || defined(OS_WIN)
+TEST(UserInputMonitorTest, KeyPressCounter) {
+ KeyboardEventCounter counter;
+
+ EXPECT_EQ(0u, counter.GetKeyPressCount());
+
+ counter.OnKeyboardEvent(ui::ET_KEY_PRESSED, ui::VKEY_0);
+ EXPECT_EQ(1u, counter.GetKeyPressCount());
+
+ // Holding the same key without releasing it does not increase the count.
+ counter.OnKeyboardEvent(ui::ET_KEY_PRESSED, ui::VKEY_0);
+ EXPECT_EQ(1u, counter.GetKeyPressCount());
+
+ // Releasing the key does not affect the total count.
+ counter.OnKeyboardEvent(ui::ET_KEY_RELEASED, ui::VKEY_0);
+ EXPECT_EQ(1u, counter.GetKeyPressCount());
+
+ counter.OnKeyboardEvent(ui::ET_KEY_PRESSED, ui::VKEY_0);
+ counter.OnKeyboardEvent(ui::ET_KEY_RELEASED, ui::VKEY_0);
+ EXPECT_EQ(2u, counter.GetKeyPressCount());
+}
+#endif // defined(OS_LINUX) || defined(OS_WIN)
+
+TEST(UserInputMonitorTest, CreatePlatformSpecific) {
+#if defined(OS_LINUX)
+ base::MessageLoop message_loop(base::MessageLoop::TYPE_IO);
+#else
+ base::MessageLoop message_loop(base::MessageLoop::TYPE_UI);
+#endif // defined(OS_LINUX)
+
+ base::RunLoop run_loop;
+ scoped_ptr<UserInputMonitor> monitor = UserInputMonitor::Create(
+ message_loop.message_loop_proxy(), message_loop.message_loop_proxy());
+
+ if (!monitor)
+ return;
+
+ MockMouseListener listener;
+ // Ignore any callbacks.
+ EXPECT_CALL(listener, OnMouseMoved(testing::_)).Times(testing::AnyNumber());
+
+#if !defined(OS_MACOSX)
+ monitor->AddMouseListener(&listener);
+ monitor->RemoveMouseListener(&listener);
+#endif // !define(OS_MACOSX)
+
+ monitor->EnableKeyPressMonitoring();
+ monitor->DisableKeyPressMonitoring();
+
+ monitor.reset();
+ run_loop.RunUntilIdle();
+}
+
+} // namespace media
diff --git a/chromium/media/base/user_input_monitor_win.cc b/chromium/media/base/user_input_monitor_win.cc
new file mode 100644
index 00000000000..13b826f01eb
--- /dev/null
+++ b/chromium/media/base/user_input_monitor_win.cc
@@ -0,0 +1,298 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/user_input_monitor.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/win/message_window.h"
+#include "media/base/keyboard_event_counter.h"
+#include "third_party/skia/include/core/SkPoint.h"
+#include "ui/events/keycodes/keyboard_code_conversion_win.h"
+
+namespace media {
+namespace {
+
+// From the HID Usage Tables specification.
+const USHORT kGenericDesktopPage = 1;
+const USHORT kMouseUsage = 2;
+const USHORT kKeyboardUsage = 6;
+
+// This is the actual implementation of event monitoring. It's separated from
+// UserInputMonitorWin since it needs to be deleted on the UI thread.
+class UserInputMonitorWinCore
+ : public base::SupportsWeakPtr<UserInputMonitorWinCore> {
+ public:
+ enum EventBitMask {
+ MOUSE_EVENT_MASK = 1,
+ KEYBOARD_EVENT_MASK = 2,
+ };
+
+ explicit UserInputMonitorWinCore(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
+ const scoped_refptr<UserInputMonitor::MouseListenerList>&
+ mouse_listeners);
+ ~UserInputMonitorWinCore();
+
+ size_t GetKeyPressCount() const;
+ void StartMonitor(EventBitMask type);
+ void StopMonitor(EventBitMask type);
+
+ private:
+ // Handles WM_INPUT messages.
+ LRESULT OnInput(HRAWINPUT input_handle);
+ // Handles messages received by |window_|.
+ bool HandleMessage(UINT message,
+ WPARAM wparam,
+ LPARAM lparam,
+ LRESULT* result);
+ RAWINPUTDEVICE* GetRawInputDevices(EventBitMask event, DWORD flags);
+
+ // Task runner on which |window_| is created.
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+ scoped_refptr<ObserverListThreadSafe<UserInputMonitor::MouseEventListener> >
+ mouse_listeners_;
+
+ // These members are only accessed on the UI thread.
+ scoped_ptr<base::win::MessageWindow> window_;
+ uint8 events_monitored_;
+ KeyboardEventCounter counter_;
+
+ DISALLOW_COPY_AND_ASSIGN(UserInputMonitorWinCore);
+};
+
+class UserInputMonitorWin : public UserInputMonitor {
+ public:
+ explicit UserInputMonitorWin(
+ const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner);
+ virtual ~UserInputMonitorWin();
+
+ // Public UserInputMonitor overrides.
+ virtual size_t GetKeyPressCount() const OVERRIDE;
+
+ private:
+ // Private UserInputMonitor overrides.
+ virtual void StartKeyboardMonitoring() OVERRIDE;
+ virtual void StopKeyboardMonitoring() OVERRIDE;
+ virtual void StartMouseMonitoring() OVERRIDE;
+ virtual void StopMouseMonitoring() OVERRIDE;
+
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+ UserInputMonitorWinCore* core_;
+
+ DISALLOW_COPY_AND_ASSIGN(UserInputMonitorWin);
+};
+
+UserInputMonitorWinCore::UserInputMonitorWinCore(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
+ const scoped_refptr<UserInputMonitor::MouseListenerList>& mouse_listeners)
+ : ui_task_runner_(ui_task_runner),
+ mouse_listeners_(mouse_listeners),
+ events_monitored_(0) {}
+
+UserInputMonitorWinCore::~UserInputMonitorWinCore() {
+ DCHECK(!window_);
+ DCHECK(!events_monitored_);
+}
+
+size_t UserInputMonitorWinCore::GetKeyPressCount() const {
+ return counter_.GetKeyPressCount();
+}
+
+void UserInputMonitorWinCore::StartMonitor(EventBitMask type) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ if (events_monitored_ & type)
+ return;
+
+ if (type == KEYBOARD_EVENT_MASK)
+ counter_.Reset();
+
+ if (!window_) {
+ window_.reset(new base::win::MessageWindow());
+ if (!window_->Create(base::Bind(&UserInputMonitorWinCore::HandleMessage,
+ base::Unretained(this)))) {
+ LOG_GETLASTERROR(ERROR) << "Failed to create the raw input window";
+ window_.reset();
+ return;
+ }
+ }
+
+ // Register to receive raw mouse and/or keyboard input.
+ scoped_ptr<RAWINPUTDEVICE> device(GetRawInputDevices(type, RIDEV_INPUTSINK));
+ if (!RegisterRawInputDevices(device.get(), 1, sizeof(*device))) {
+ LOG_GETLASTERROR(ERROR)
+ << "RegisterRawInputDevices() failed for RIDEV_INPUTSINK";
+ return;
+ }
+ events_monitored_ |= type;
+}
+
+void UserInputMonitorWinCore::StopMonitor(EventBitMask type) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ if (!(events_monitored_ & type))
+ return;
+
+ // Stop receiving raw input.
+ DCHECK(window_);
+ scoped_ptr<RAWINPUTDEVICE> device(GetRawInputDevices(type, RIDEV_REMOVE));
+
+ if (!RegisterRawInputDevices(device.get(), 1, sizeof(*device))) {
+ LOG_GETLASTERROR(INFO)
+ << "RegisterRawInputDevices() failed for RIDEV_REMOVE";
+ }
+
+ events_monitored_ &= ~type;
+ if (events_monitored_ == 0)
+ window_.reset();
+}
+
+LRESULT UserInputMonitorWinCore::OnInput(HRAWINPUT input_handle) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ // Get the size of the input record.
+ UINT size = 0;
+ UINT result = GetRawInputData(
+ input_handle, RID_INPUT, NULL, &size, sizeof(RAWINPUTHEADER));
+ if (result == -1) {
+ LOG_GETLASTERROR(ERROR) << "GetRawInputData() failed";
+ return 0;
+ }
+ DCHECK_EQ(0u, result);
+
+ // Retrieve the input record itself.
+ scoped_ptr<uint8[]> buffer(new uint8[size]);
+ RAWINPUT* input = reinterpret_cast<RAWINPUT*>(buffer.get());
+ result = GetRawInputData(
+ input_handle, RID_INPUT, buffer.get(), &size, sizeof(RAWINPUTHEADER));
+ if (result == -1) {
+ LOG_GETLASTERROR(ERROR) << "GetRawInputData() failed";
+ return 0;
+ }
+ DCHECK_EQ(size, result);
+
+ // Notify the observer about events generated locally.
+ if (input->header.dwType == RIM_TYPEMOUSE && input->header.hDevice != NULL) {
+ POINT position;
+ if (!GetCursorPos(&position)) {
+ position.x = 0;
+ position.y = 0;
+ }
+ mouse_listeners_->Notify(
+ &UserInputMonitor::MouseEventListener::OnMouseMoved,
+ SkIPoint::Make(position.x, position.y));
+ } else if (input->header.dwType == RIM_TYPEKEYBOARD &&
+ input->header.hDevice != NULL) {
+ ui::EventType event = (input->data.keyboard.Flags & RI_KEY_BREAK)
+ ? ui::ET_KEY_RELEASED
+ : ui::ET_KEY_PRESSED;
+ ui::KeyboardCode key_code =
+ ui::KeyboardCodeForWindowsKeyCode(input->data.keyboard.VKey);
+ counter_.OnKeyboardEvent(event, key_code);
+ }
+
+ return DefRawInputProc(&input, 1, sizeof(RAWINPUTHEADER));
+}
+
+bool UserInputMonitorWinCore::HandleMessage(UINT message,
+ WPARAM wparam,
+ LPARAM lparam,
+ LRESULT* result) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ switch (message) {
+ case WM_INPUT:
+ *result = OnInput(reinterpret_cast<HRAWINPUT>(lparam));
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+RAWINPUTDEVICE* UserInputMonitorWinCore::GetRawInputDevices(EventBitMask event,
+ DWORD flags) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ scoped_ptr<RAWINPUTDEVICE> device(new RAWINPUTDEVICE());
+ if (event == MOUSE_EVENT_MASK) {
+ device->dwFlags = flags;
+ device->usUsagePage = kGenericDesktopPage;
+ device->usUsage = kMouseUsage;
+ device->hwndTarget = window_->hwnd();
+ } else {
+ DCHECK_EQ(KEYBOARD_EVENT_MASK, event);
+ device->dwFlags = flags;
+ device->usUsagePage = kGenericDesktopPage;
+ device->usUsage = kKeyboardUsage;
+ device->hwndTarget = window_->hwnd();
+ }
+ return device.release();
+}
+
+//
+// Implementation of UserInputMonitorWin.
+//
+
+UserInputMonitorWin::UserInputMonitorWin(
+ const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner)
+ : ui_task_runner_(ui_task_runner),
+ core_(new UserInputMonitorWinCore(ui_task_runner, mouse_listeners())) {}
+
+UserInputMonitorWin::~UserInputMonitorWin() {
+ if (!ui_task_runner_->DeleteSoon(FROM_HERE, core_))
+ delete core_;
+}
+
+size_t UserInputMonitorWin::GetKeyPressCount() const {
+ return core_->GetKeyPressCount();
+}
+
+void UserInputMonitorWin::StartKeyboardMonitoring() {
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&UserInputMonitorWinCore::StartMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorWinCore::KEYBOARD_EVENT_MASK));
+}
+
+void UserInputMonitorWin::StopKeyboardMonitoring() {
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&UserInputMonitorWinCore::StopMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorWinCore::KEYBOARD_EVENT_MASK));
+}
+
+void UserInputMonitorWin::StartMouseMonitoring() {
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&UserInputMonitorWinCore::StartMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorWinCore::MOUSE_EVENT_MASK));
+}
+
+void UserInputMonitorWin::StopMouseMonitoring() {
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&UserInputMonitorWinCore::StopMonitor,
+ core_->AsWeakPtr(),
+ UserInputMonitorWinCore::MOUSE_EVENT_MASK));
+}
+
+} // namespace
+
+scoped_ptr<UserInputMonitor> UserInputMonitor::Create(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& ui_task_runner) {
+ return scoped_ptr<UserInputMonitor>(new UserInputMonitorWin(ui_task_runner));
+}
+
+} // namespace media
diff --git a/chromium/media/base/vector_math.cc b/chromium/media/base/vector_math.cc
index ac6de92ad8d..de946ca8cbf 100644
--- a/chromium/media/base/vector_math.cc
+++ b/chromium/media/base/vector_math.cc
@@ -18,9 +18,8 @@ namespace vector_math {
// If we know the minimum architecture at compile time, avoid CPU detection.
// Force NaCl code to use C routines since (at present) nothing there uses these
-// methods and plumbing the -msse built library is non-trivial. iOS lies about
-// its architecture, so we also need to exclude it here.
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL) && !defined(OS_IOS)
+// methods and plumbing the -msse built library is non-trivial.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
#if defined(__SSE__)
#define FMAC_FUNC FMAC_SSE
#define FMUL_FUNC FMUL_SSE
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
index 08e7e1ad7a2..a372889cb55 100644
--- a/chromium/media/base/video_frame.cc
+++ b/chromium/media/base/video_frame.cc
@@ -124,9 +124,13 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
uint8* data,
+ size_t data_size,
base::SharedMemoryHandle handle,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb) {
+ if (data_size < AllocationSize(format, coded_size))
+ return NULL;
+
switch (format) {
case I420: {
scoped_refptr<VideoFrame> frame(new VideoFrame(
@@ -249,6 +253,39 @@ static inline size_t RoundUp(size_t value, size_t alignment) {
return ((value + (alignment - 1)) & ~(alignment-1));
}
+// static
+size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
+ switch (format) {
+ case VideoFrame::RGB32:
+ return coded_size.GetArea() * 4;
+ case VideoFrame::YV12:
+ case VideoFrame::I420: {
+ const size_t rounded_size =
+ RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
+ return rounded_size * 3 / 2;
+ }
+ case VideoFrame::YV12A: {
+ const size_t rounded_size =
+ RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
+ return rounded_size * 5 / 2;
+ }
+ case VideoFrame::YV16: {
+ const size_t rounded_size =
+ RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
+ return rounded_size * 2;
+ }
+ case VideoFrame::INVALID:
+ case VideoFrame::EMPTY:
+ case VideoFrame::NATIVE_TEXTURE:
+#if defined(GOOGLE_TV)
+ case VideoFrame::HOLE:
+#endif
+ break;
+ }
+ NOTREACHED() << "Unsupported video frame format: " << format;
+ return 0;
+}
+
// Release data allocated by AllocateRGB() or AllocateYUV().
static void ReleaseData(uint8* data) {
DCHECK(data);
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index 82a08a986de..df383d0d798 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -148,6 +148,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
uint8* data,
+ size_t data_size,
base::SharedMemoryHandle handle,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb);
@@ -192,6 +193,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
static size_t NumPlanes(Format format);
+ // Returns the required allocation size for a (tightly packed) frame of the
+ // given coded size and format.
+ static size_t AllocationSize(Format format, const gfx::Size& coded_size);
+
Format format() const { return format_; }
const gfx::Size& coded_size() const { return coded_size_; }
diff --git a/chromium/media/cast/DEPS b/chromium/media/cast/DEPS
new file mode 100644
index 00000000000..8e10c67d316
--- /dev/null
+++ b/chromium/media/cast/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+net",
+ "+third_party/webrtc",
+]
diff --git a/chromium/media/cast/OWNERS b/chromium/media/cast/OWNERS
new file mode 100644
index 00000000000..22e814b0a70
--- /dev/null
+++ b/chromium/media/cast/OWNERS
@@ -0,0 +1,2 @@
+hclam@chromium.org
+hubbe@chromium.org
diff --git a/chromium/media/cast/README b/chromium/media/cast/README
new file mode 100644
index 00000000000..4878967fd5c
--- /dev/null
+++ b/chromium/media/cast/README
@@ -0,0 +1,64 @@
+This directory contains a RTP/RTCP library used for the Cast mirroring
+protocol. This library is specifically built for low latency purposes and
+enables Chrome to send real-time video and audio streams.
+
+CONTENTS
+
+cast/
+ Build rules and top level source files and headers.
+
+cast/audio_receiver/
+ Module for receiving and decodes audio RTP stream.
+
+cast/audio_sender/
+ Module for encoding and sending audio RTP stream.
+
+cast/congestion_control/
+ Bandwidth estimation and network congestion handling.
+
+cast/pacing/
+ Module for rate limiting data outflow.
+
+cast/rtcp/
+ Module for handling RTCP messages.
+
+cast/rtp_common/
+ Module for common code used for RTP messages.
+
+cast/rtp_receiver/
+ Module for reciving RTP messages.
+
+cast/rtp_sender/
+ Module for sending RTP messages.
+
+cast/test/
+ Module for test applications.
+
+cast/video_receiver/
+ Module for receiving and decodes video RTP stream.
+
+cast/video_sender/
+ Module for encoding and sending video RTP stream.
+
+DEPENDENCIES
+
+Content of this directory should only depend on:
+
+base/
+ Provides base libraries and platform independent layer.
+
+net/
+ Provides network capabilities.
+
+third_party/libvpx
+ Provides video encoder.
+
+third_party/opus
+ Provides audio encoder.
+
+third_party/webrtc
+ Provides audio signal processing.
+
+OWNERS
+
+See OWNERS for ownership.
diff --git a/chromium/media/cast/audio_receiver/audio_decoder.cc b/chromium/media/cast/audio_receiver/audio_decoder.cc
new file mode 100644
index 00000000000..266c04ea20d
--- /dev/null
+++ b/chromium/media/cast/audio_receiver/audio_decoder.cc
@@ -0,0 +1,99 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "media/cast/audio_receiver/audio_decoder.h"
+
+#include "third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "third_party/webrtc/modules/interface/module_common_types.h"
+
+namespace media {
+namespace cast {
+
+AudioDecoder::AudioDecoder(scoped_refptr<CastThread> cast_thread,
+ const AudioReceiverConfig& audio_config)
+ : cast_thread_(cast_thread),
+ have_received_packets_(false) {
+ audio_decoder_ = webrtc::AudioCodingModule::Create(0);
+ audio_decoder_->InitializeReceiver();
+
+ webrtc::CodecInst receive_codec;
+ switch (audio_config.codec) {
+ case kPcm16:
+ receive_codec.pltype = audio_config.rtp_payload_type;
+ strncpy(receive_codec.plname, "L16", 4);
+ receive_codec.plfreq = audio_config.frequency;
+ receive_codec.pacsize = -1;
+ receive_codec.channels = audio_config.channels;
+ receive_codec.rate = -1;
+ break;
+ case kOpus:
+ receive_codec.pltype = audio_config.rtp_payload_type;
+ strncpy(receive_codec.plname, "opus", 5);
+ receive_codec.plfreq = audio_config.frequency;
+ receive_codec.pacsize = -1;
+ receive_codec.channels = audio_config.channels;
+ receive_codec.rate = -1;
+ break;
+ case kExternalAudio:
+ DCHECK(false) << "Codec must be specified for audio decoder";
+ break;
+ }
+ if (audio_decoder_->RegisterReceiveCodec(receive_codec) != 0) {
+ DCHECK(false) << "Failed to register receive codec";
+ }
+
+ audio_decoder_->SetMaximumPlayoutDelay(audio_config.rtp_max_delay_ms);
+ audio_decoder_->SetPlayoutMode(webrtc::streaming);
+}
+
+AudioDecoder::~AudioDecoder() {
+ webrtc::AudioCodingModule::Destroy(audio_decoder_);
+}
+
+bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
+ int desired_frequency,
+ PcmAudioFrame* audio_frame,
+ uint32* rtp_timestamp) {
+ if (!have_received_packets_) return false;
+
+ for (int i = 0; i < number_of_10ms_blocks; ++i) {
+ webrtc::AudioFrame webrtc_audio_frame;
+ if (0 != audio_decoder_->PlayoutData10Ms(desired_frequency,
+ &webrtc_audio_frame)) {
+ return false;
+ }
+ if (webrtc_audio_frame.speech_type_ == webrtc::AudioFrame::kPLCCNG ||
+ webrtc_audio_frame.speech_type_ == webrtc::AudioFrame::kUndefined) {
+ // We are only interested in real decoded audio.
+ return false;
+ }
+ audio_frame->frequency = webrtc_audio_frame.sample_rate_hz_;
+ audio_frame->channels = webrtc_audio_frame.num_channels_;
+
+ if (i == 0) {
+ // Use the timestamp from the first 10ms block.
+ if (0 != audio_decoder_->PlayoutTimestamp(rtp_timestamp)) {
+ return false;
+ }
+ }
+ int samples_per_10ms = webrtc_audio_frame.samples_per_channel_;
+
+ audio_frame->samples.insert(
+ audio_frame->samples.end(),
+ &webrtc_audio_frame.data_[0],
+ &webrtc_audio_frame.data_[samples_per_10ms * audio_frame->channels]);
+ }
+ return true;
+}
+
+void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header) {
+ have_received_packets_ = true;
+ audio_decoder_->IncomingPacket(payload_data, payload_size, rtp_header.webrtc);
+}
+
+} // namespace cast
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/cast/audio_receiver/audio_decoder.h b/chromium/media/cast/audio_receiver/audio_decoder.h
new file mode 100644
index 00000000000..2f5f13aea2d
--- /dev/null
+++ b/chromium/media/cast/audio_receiver/audio_decoder.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
+#define MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace webrtc {
+class AudioCodingModule;
+}
+
+namespace media {
+namespace cast {
+
+// Thread safe class.
+// It should be called from the main cast thread; however that is not required.
+class AudioDecoder : public base::RefCountedThreadSafe<AudioDecoder> {
+ public:
+ explicit AudioDecoder(scoped_refptr<CastThread> cast_thread,
+ const AudioReceiverConfig& audio_config);
+
+ virtual ~AudioDecoder();
+
+ // Extract a raw audio frame from the decoder.
+ // Set the number of desired 10ms blocks and frequency.
+ bool GetRawAudioFrame(int number_of_10ms_blocks,
+ int desired_frequency,
+ PcmAudioFrame* audio_frame,
+ uint32* rtp_timestamp);
+
+ // Insert an RTP packet to the decoder.
+ void IncomingParsedRtpPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header);
+
+ private:
+ // Can't use scoped_ptr due to protected constructor within webrtc.
+ webrtc::AudioCodingModule* audio_decoder_;
+ bool have_received_packets_;
+ scoped_refptr<CastThread> cast_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_ \ No newline at end of file
diff --git a/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc b/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc
new file mode 100644
index 00000000000..dbe3e324df0
--- /dev/null
+++ b/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/audio_receiver/audio_decoder.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+class AudioDecoderTest : public ::testing::Test {
+ protected:
+ AudioDecoderTest() {}
+
+ ~AudioDecoderTest() {}
+
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ }
+ void Configure(const AudioReceiverConfig& audio_config) {
+ audio_decoder_ = new AudioDecoder(cast_thread_, audio_config);
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<AudioDecoder> audio_decoder_;
+};
+
+TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
+ AudioReceiverConfig audio_config;
+ audio_config.rtp_payload_type = 127;
+ audio_config.frequency = 16000;
+ audio_config.channels = 1;
+ audio_config.codec = kPcm16;
+ audio_config.use_external_decoder = false;
+ Configure(audio_config);
+
+ RtpCastHeader rtp_header;
+ rtp_header.webrtc.header.payloadType = 127;
+ rtp_header.webrtc.header.sequenceNumber = 1234;
+ rtp_header.webrtc.header.timestamp = 0x87654321;
+ rtp_header.webrtc.header.ssrc = 0x12345678;
+ rtp_header.webrtc.header.paddingLength = 0;
+ rtp_header.webrtc.header.headerLength = 12;
+ rtp_header.webrtc.type.Audio.channel = 1;
+ rtp_header.webrtc.type.Audio.isCNG = false;
+
+ std::vector<int16> payload(640, 0x1234);
+
+ uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
+ int payload_size = payload.size() * sizeof(int16);
+
+ audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
+ rtp_header);
+
+ int number_of_10ms_blocks = 4;
+ int desired_frequency = 16000;
+ PcmAudioFrame audio_frame;
+ uint32 rtp_timestamp;
+
+ EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
+ desired_frequency,
+ &audio_frame,
+ &rtp_timestamp));
+
+ EXPECT_EQ(1, audio_frame.channels);
+ EXPECT_EQ(16000, audio_frame.frequency);
+ EXPECT_EQ(640ul, audio_frame.samples.size());
+ // First 10 samples per channel are 0 from NetEq.
+ for (size_t i = 10; i < audio_frame.samples.size(); ++i) {
+ EXPECT_EQ(0x3412, audio_frame.samples[i]);
+ }
+ task_runner_->RunTasks();
+}
+
+TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
+ AudioReceiverConfig audio_config;
+ audio_config.rtp_payload_type = 127;
+ audio_config.frequency = 16000;
+ audio_config.channels = 2;
+ audio_config.codec = kPcm16;
+ audio_config.use_external_decoder = false;
+ Configure(audio_config);
+
+ RtpCastHeader rtp_header;
+ rtp_header.webrtc.header.payloadType = 127;
+ rtp_header.webrtc.header.sequenceNumber = 1234;
+ rtp_header.webrtc.header.timestamp = 0x87654321;
+ rtp_header.webrtc.header.ssrc = 0x12345678;
+ rtp_header.webrtc.header.paddingLength = 0;
+ rtp_header.webrtc.header.headerLength = 12;
+
+ rtp_header.webrtc.type.Audio.isCNG = false;
+ rtp_header.webrtc.type.Audio.channel = 2;
+
+ std::vector<int16> payload(640, 0x1234);
+
+ uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
+ int payload_size = payload.size() * sizeof(int16);
+
+ audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
+ rtp_header);
+
+
+ int number_of_10ms_blocks = 2;
+ int desired_frequency = 16000;
+ PcmAudioFrame audio_frame;
+ uint32 rtp_timestamp;
+
+ EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
+ desired_frequency,
+ &audio_frame,
+ &rtp_timestamp));
+
+ EXPECT_EQ(2, audio_frame.channels);
+ EXPECT_EQ(16000, audio_frame.frequency);
+ EXPECT_EQ(640ul, audio_frame.samples.size());
+ for (size_t i = 10 * audio_config.channels; i < audio_frame.samples.size();
+ ++i) {
+ EXPECT_EQ(0x3412, audio_frame.samples[i]);
+ }
+
+ rtp_header.webrtc.header.sequenceNumber++;
+ rtp_header.webrtc.header.timestamp += (audio_config.frequency / 100) * 2 * 2;
+ audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
+ rtp_header);
+
+ EXPECT_EQ(2, audio_frame.channels);
+ EXPECT_EQ(16000, audio_frame.frequency);
+ EXPECT_EQ(640ul, audio_frame.samples.size());
+ // First 10 samples per channel are 0 from NetEq.
+ for (size_t i = 10 * audio_config.channels; i < audio_frame.samples.size();
+ ++i) {
+ EXPECT_EQ(0x3412, audio_frame.samples[i]);
+ }
+ task_runner_->RunTasks();
+}
+
+TEST_F(AudioDecoderTest, Pcm16Resample) {
+ AudioReceiverConfig audio_config;
+ audio_config.rtp_payload_type = 127;
+ audio_config.frequency = 16000;
+ audio_config.channels = 2;
+ audio_config.codec = kPcm16;
+ audio_config.use_external_decoder = false;
+ Configure(audio_config);
+
+ RtpCastHeader rtp_header;
+ rtp_header.webrtc.header.payloadType = 127;
+ rtp_header.webrtc.header.sequenceNumber = 1234;
+ rtp_header.webrtc.header.timestamp = 0x87654321;
+ rtp_header.webrtc.header.ssrc = 0x12345678;
+ rtp_header.webrtc.header.paddingLength = 0;
+ rtp_header.webrtc.header.headerLength = 12;
+
+ rtp_header.webrtc.type.Audio.isCNG = false;
+ rtp_header.webrtc.type.Audio.channel = 2;
+
+ std::vector<int16> payload(640, 0x1234);
+
+ uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
+ int payload_size = payload.size() * sizeof(int16);
+
+ audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
+ rtp_header);
+
+ int number_of_10ms_blocks = 2;
+ int desired_frequency = 48000;
+ PcmAudioFrame audio_frame;
+ uint32 rtp_timestamp;
+
+ EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
+ desired_frequency,
+ &audio_frame,
+ &rtp_timestamp));
+
+ EXPECT_EQ(2, audio_frame.channels);
+ EXPECT_EQ(48000, audio_frame.frequency);
+ EXPECT_EQ(1920ul, audio_frame.samples.size()); // Upsampled to 48 KHz.
+ int count = 0;
+ // Resampling makes the variance worse.
+ for (size_t i = 100 * audio_config.channels; i < audio_frame.samples.size();
+ ++i) {
+ EXPECT_NEAR(0x3412, audio_frame.samples[i], 400);
+ if (0x3412 == audio_frame.samples[i]) count++;
+ }
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.cc b/chromium/media/cast/audio_receiver/audio_receiver.cc
new file mode 100644
index 00000000000..cf8a8b8b1da
--- /dev/null
+++ b/chromium/media/cast/audio_receiver/audio_receiver.cc
@@ -0,0 +1,304 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/audio_receiver/audio_receiver.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/audio_receiver/audio_decoder.h"
+#include "media/cast/framer/framer.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+#include "third_party/webrtc/modules/interface/module_common_types.h"
+#include "third_party/webrtc/system_wrappers/interface/sleep.h"
+#include "third_party/webrtc/system_wrappers/interface/tick_util.h"
+
+static const int64 kMaxFrameWaitMs = 20;
+const int64 kMinSchedulingDelayMs = 1;
+
+namespace media {
+namespace cast {
+
+
+// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
+// Used to pass payload data into the audio receiver.
+class LocalRtpAudioData : public RtpData {
+ public:
+ explicit LocalRtpAudioData(AudioReceiver* audio_receiver)
+ : audio_receiver_(audio_receiver),
+ time_first_incoming_packet_(),
+ first_incoming_rtp_timestamp_(0),
+ default_tick_clock_(new base::DefaultTickClock()),
+ clock_(default_tick_clock_.get()) {}
+
+ virtual void OnReceivedPayloadData(
+ const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader* rtp_header) OVERRIDE {
+ if (time_first_incoming_packet_.is_null()) {
+ first_incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
+ time_first_incoming_packet_ = clock_->NowTicks();
+ }
+ audio_receiver_->IncomingParsedRtpPacket(payload_data, payload_size,
+ *rtp_header);
+ }
+
+ void GetFirstPacketInformation(base::TimeTicks* time_incoming_packet,
+ uint32* incoming_rtp_timestamp) {
+ *time_incoming_packet = time_first_incoming_packet_;
+ *incoming_rtp_timestamp = first_incoming_rtp_timestamp_;
+ }
+
+ private:
+ AudioReceiver* audio_receiver_;
+ base::TimeTicks time_first_incoming_packet_;
+ uint32 first_incoming_rtp_timestamp_;
+ scoped_ptr<base::TickClock> default_tick_clock_;
+ base::TickClock* clock_;
+};
+
+// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
+// Used to convey cast-specific feedback from receiver to sender.
+class LocalRtpAudioFeedback : public RtpPayloadFeedback {
+ public:
+ explicit LocalRtpAudioFeedback(AudioReceiver* audio_receiver)
+ : audio_receiver_(audio_receiver) {
+ }
+
+ virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
+ audio_receiver_->CastFeedback(cast_message);
+ }
+
+ virtual void RequestKeyFrame() OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ private:
+ AudioReceiver* audio_receiver_;
+};
+
+class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
+ public:
+ explicit LocalRtpReceiverStatistics(RtpReceiver* rtp_receiver)
+ : rtp_receiver_(rtp_receiver) {
+ }
+
+ virtual void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter) OVERRIDE {
+ rtp_receiver_->GetStatistics(fraction_lost,
+ cumulative_lost,
+ extended_high_sequence_number,
+ jitter);
+ }
+
+ private:
+ RtpReceiver* rtp_receiver_;
+};
+
+
+AudioReceiver::AudioReceiver(scoped_refptr<CastThread> cast_thread,
+ const AudioReceiverConfig& audio_config,
+ PacedPacketSender* const packet_sender)
+ : cast_thread_(cast_thread),
+ codec_(audio_config.codec),
+ incoming_ssrc_(audio_config.incoming_ssrc),
+ frequency_(audio_config.frequency),
+ audio_buffer_(),
+ audio_decoder_(),
+ time_offset_(),
+ default_tick_clock_(new base::DefaultTickClock()),
+ clock_(default_tick_clock_.get()),
+ weak_factory_(this) {
+ target_delay_delta_ =
+ base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms);
+ incoming_payload_callback_.reset(new LocalRtpAudioData(this));
+ incoming_payload_feedback_.reset(new LocalRtpAudioFeedback(this));
+ if (audio_config.use_external_decoder) {
+ audio_buffer_.reset(new Framer(incoming_payload_feedback_.get(),
+ audio_config.incoming_ssrc,
+ true,
+ 0));
+ } else {
+ audio_decoder_ = new AudioDecoder(cast_thread_, audio_config);
+ }
+ rtp_receiver_.reset(new RtpReceiver(&audio_config,
+ NULL,
+ incoming_payload_callback_.get()));
+ rtp_audio_receiver_statistics_.reset(
+ new LocalRtpReceiverStatistics(rtp_receiver_.get()));
+ base::TimeDelta rtcp_interval_delta =
+ base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval);
+ rtcp_.reset(new Rtcp(NULL,
+ packet_sender,
+ NULL,
+ rtp_audio_receiver_statistics_.get(),
+ audio_config.rtcp_mode,
+ rtcp_interval_delta,
+ false,
+ audio_config.feedback_ssrc,
+ audio_config.rtcp_c_name));
+ rtcp_->SetRemoteSSRC(audio_config.incoming_ssrc);
+ ScheduleNextRtcpReport();
+}
+
+AudioReceiver::~AudioReceiver() {}
+
+void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header) {
+ if (audio_decoder_) {
+ DCHECK(!audio_buffer_) << "Invalid internal state";
+ audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
+ rtp_header);
+ return;
+ }
+ if (audio_buffer_) {
+ DCHECK(!audio_decoder_) << "Invalid internal state";
+ audio_buffer_->InsertPacket(payload_data, payload_size, rtp_header);
+ }
+}
+
+void AudioReceiver::GetRawAudioFrame(int number_of_10ms_blocks,
+ int desired_frequency,
+ const AudioFrameDecodedCallback callback) {
+ DCHECK(audio_decoder_) << "Invalid function call in this configuration";
+
+ cast_thread_->PostTask(CastThread::AUDIO_DECODER, FROM_HERE, base::Bind(
+ &AudioReceiver::DecodeAudioFrameThread, weak_factory_.GetWeakPtr(),
+ number_of_10ms_blocks, desired_frequency, callback));
+}
+
+void AudioReceiver::DecodeAudioFrameThread(
+ int number_of_10ms_blocks,
+ int desired_frequency,
+ const AudioFrameDecodedCallback callback) {
+ // TODO(mikhal): Allow the application to allocate this memory.
+ scoped_ptr<PcmAudioFrame> audio_frame(new PcmAudioFrame());
+
+ uint32 rtp_timestamp = 0;
+ if (!audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
+ desired_frequency,
+ audio_frame.get(),
+ &rtp_timestamp)) {
+ return;
+ }
+ base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks playout_time;
+ playout_time = GetPlayoutTime(now, rtp_timestamp);
+
+ // Frame is ready - Send back to the main thread.
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(callback,
+ base::Passed(&audio_frame), playout_time));
+}
+
+bool AudioReceiver::GetEncodedAudioFrame(EncodedAudioFrame* encoded_frame,
+ base::TimeTicks* playout_time) {
+ DCHECK(audio_buffer_) << "Invalid function call in this configuration";
+
+ uint32 rtp_timestamp = 0;
+ bool next_frame = false;
+ base::TimeTicks timeout = clock_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
+ if (!audio_buffer_->GetEncodedAudioFrame(timeout, encoded_frame,
+ &rtp_timestamp, &next_frame)) {
+ return false;
+ }
+ base::TimeTicks now = clock_->NowTicks();
+ *playout_time = GetPlayoutTime(now, rtp_timestamp);
+
+ base::TimeDelta time_until_playout = now - *playout_time;
+ base::TimeDelta time_until_release = time_until_playout -
+ base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
+ base::TimeDelta zero_delta = base::TimeDelta::FromMilliseconds(0);
+ if (!next_frame && (time_until_release > zero_delta)) {
+ // Relying on the application to keep polling.
+ return false;
+ }
+ encoded_frame->codec = codec_;
+ return true;
+}
+
+void AudioReceiver::ReleaseFrame(uint8 frame_id) {
+ audio_buffer_->ReleaseFrame(frame_id);
+}
+
+void AudioReceiver::IncomingPacket(const uint8* packet, int length) {
+ bool rtcp_packet = Rtcp::IsRtcpPacket(packet, length);
+ if (!rtcp_packet) {
+ rtp_receiver_->ReceivedPacket(packet, length);
+ } else {
+ rtcp_->IncomingRtcpPacket(packet, length);
+ }
+}
+
+void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
+ rtcp_->SendRtcpCast(cast_message);
+}
+
+base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
+ uint32 rtp_timestamp) {
+ // Senders time in ms when this frame was recorded.
+ // Note: the senders clock and our local clock might not be synced.
+ base::TimeTicks rtp_timestamp_in_ticks;
+ base::TimeDelta zero_delta = base::TimeDelta::FromMilliseconds(0);
+ if (time_offset_ == zero_delta) {
+ base::TimeTicks time_first_incoming_packet;
+ uint32 first_incoming_rtp_timestamp;
+
+ incoming_payload_callback_->GetFirstPacketInformation(
+ &time_first_incoming_packet, &first_incoming_rtp_timestamp);
+
+ if (rtcp_->RtpTimestampInSenderTime(frequency_,
+ first_incoming_rtp_timestamp,
+ &rtp_timestamp_in_ticks)) {
+ time_offset_ = time_first_incoming_packet - rtp_timestamp_in_ticks;
+ } else {
+ // We have not received any RTCP to sync the stream play it out as soon as
+ // possible.
+ uint32 rtp_timestamp_diff =
+ rtp_timestamp - first_incoming_rtp_timestamp;
+
+ int frequency_khz = frequency_ / 1000;
+ base::TimeDelta rtp_time_diff_delta =
+ base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz);
+ base::TimeDelta time_diff_delta = now - time_first_incoming_packet;
+ if (rtp_time_diff_delta > time_diff_delta) {
+ return (now + (rtp_time_diff_delta - time_diff_delta));
+ } else {
+ return now;
+ }
+ }
+ }
+ // This can fail if we have not received any RTCP packets in a long time.
+ if (rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp,
+ &rtp_timestamp_in_ticks)) {
+ return (rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_);
+ } else {
+ return now;
+ }
+}
+
+void AudioReceiver::ScheduleNextRtcpReport() {
+ base::TimeDelta time_to_send =
+ rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+
+ time_to_send = std::max(time_to_send,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::SendNextRtcpReport,
+ weak_factory_.GetWeakPtr()), time_to_send);
+}
+
+void AudioReceiver::SendNextRtcpReport() {
+ rtcp_->SendRtcpReport(incoming_ssrc_);
+ ScheduleNextRtcpReport();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.gypi b/chromium/media/cast/audio_receiver/audio_receiver.gypi
new file mode 100644
index 00000000000..240f742b899
--- /dev/null
+++ b/chromium/media/cast/audio_receiver/audio_receiver.gypi
@@ -0,0 +1,28 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_audio_receiver',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc/',
+ ],
+ 'sources': [
+ 'audio_decoder.h',
+ 'audio_decoder.cc',
+ 'audio_receiver.h',
+ 'audio_receiver.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
+ '<(DEPTH)/media/cast/rtp_receiver/rtp_receiver.gyp:*',
+ '<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
+ ],
+ },
+ ],
+} \ No newline at end of file
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.h b/chromium/media/cast/audio_receiver/audio_receiver.h
new file mode 100644
index 00000000000..9a1f138efc6
--- /dev/null
+++ b/chromium/media/cast/audio_receiver/audio_receiver.h
@@ -0,0 +1,117 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_
+#define MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/rtcp/rtcp.h" // RtcpCastMessage
+#include "media/cast/rtp_common/rtp_defines.h" // RtpCastHeader
+
+namespace media {
+namespace cast {
+
+class AudioDecoder;
+class Framer;
+class LocalRtpAudioData;
+class LocalRtpAudioFeedback;
+class PacedPacketSender;
+class RtpReceiver;
+class RtpReceiverStatistics;
+
+// This class is not thread safe. Should only be called from the Main cast
+// thread.
+class AudioReceiver : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<AudioReceiver> {
+ public:
+ AudioReceiver(scoped_refptr<CastThread> cast_thread,
+ const AudioReceiverConfig& audio_config,
+ PacedPacketSender* const packet_sender);
+
+ virtual ~AudioReceiver();
+
+ // Extract a raw audio frame from the cast receiver.
+ // Actual decoding will be preformed on a designated audio_decoder thread.
+ void GetRawAudioFrame(int number_of_10ms_blocks,
+ int desired_frequency,
+ const AudioFrameDecodedCallback callback);
+
+ // Extract an encoded audio frame from the cast receiver.
+ bool GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
+ base::TimeTicks* playout_time);
+
+ // Release frame - should be called following a GetCodedAudioFrame call.
+ // Should only be called from the main cast thread.
+ void ReleaseFrame(uint8 frame_id);
+
+ // Should only be called from the main cast thread.
+ void IncomingPacket(const uint8* packet, int length);
+
+ // Only used for testing.
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ rtcp_->set_clock(clock);
+ }
+
+ protected:
+ void IncomingParsedRtpPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header);
+ private:
+ friend class LocalRtpAudioData;
+ friend class LocalRtpAudioFeedback;
+
+ void CastFeedback(const RtcpCastMessage& cast_message);
+
+ // Actual decoding implementation - should be called under the audio decoder
+ // thread.
+ void DecodeAudioFrameThread(int number_of_10ms_blocks,
+ int desired_frequency,
+ const AudioFrameDecodedCallback callback);
+
+ // Return the playout time based on the current time and rtp timestamp.
+ base::TimeTicks GetPlayoutTime(base::TimeTicks now,
+ uint32 rtp_timestamp);
+
+ // Schedule the next RTCP report.
+ void ScheduleNextRtcpReport();
+
+ // Actually send the next RTCP report.
+ void SendNextRtcpReport();
+
+ scoped_refptr<CastThread> cast_thread_;
+ base::WeakPtrFactory<AudioReceiver> weak_factory_;
+
+ const AudioCodec codec_;
+ const uint32 incoming_ssrc_;
+ const int frequency_;
+ base::TimeDelta target_delay_delta_;
+ scoped_ptr<Framer> audio_buffer_;
+ scoped_refptr<AudioDecoder> audio_decoder_;
+ scoped_ptr<LocalRtpAudioData> incoming_payload_callback_;
+ scoped_ptr<LocalRtpAudioFeedback> incoming_payload_feedback_;
+ scoped_ptr<RtpReceiver> rtp_receiver_;
+ scoped_ptr<Rtcp> rtcp_;
+ scoped_ptr<RtpReceiverStatistics> rtp_audio_receiver_statistics_;
+ base::TimeDelta time_offset_;
+
+ scoped_ptr<base::TickClock> default_tick_clock_;
+ base::TickClock* clock_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_ \ No newline at end of file
diff --git a/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc b/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc
new file mode 100644
index 00000000000..0cb564b1a40
--- /dev/null
+++ b/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc
@@ -0,0 +1,95 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/audio_receiver/audio_receiver.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+static const int kPacketSize = 1500;
+static const int64 kStartMillisecond = 123456789;
+
+class PeerAudioReceiver : public AudioReceiver {
+ public:
+ PeerAudioReceiver(scoped_refptr<CastThread> cast_thread,
+ const AudioReceiverConfig& audio_config,
+ PacedPacketSender* const packet_sender)
+ : AudioReceiver(cast_thread, audio_config, packet_sender) {
+ }
+ using AudioReceiver::IncomingParsedRtpPacket;
+};
+
+class AudioReceiverTest : public ::testing::Test {
+ protected:
+ AudioReceiverTest() {
+ // Configure the audio receiver to use PCM16.
+ audio_config_.rtp_payload_type = 127;
+ audio_config_.frequency = 16000;
+ audio_config_.channels = 1;
+ audio_config_.codec = kPcm16;
+ audio_config_.use_external_decoder = false;
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ }
+
+ void Configure(bool use_external_decoder) {
+ audio_config_.use_external_decoder = use_external_decoder;
+ receiver_.reset(new
+ PeerAudioReceiver(cast_thread_, audio_config_, &mock_transport_));
+ receiver_->set_clock(&testing_clock_);
+ }
+
+ ~AudioReceiverTest() {}
+
+ virtual void SetUp() {
+ payload_.assign(kPacketSize, 0);
+ // Always start with a key frame.
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 0;
+ rtp_header_.packet_id = 0;
+ rtp_header_.max_packet_id = 0;
+ rtp_header_.is_reference = false;
+ rtp_header_.reference_frame_id = 0;
+ }
+
+ AudioReceiverConfig audio_config_;
+ std::vector<uint8> payload_;
+ RtpCastHeader rtp_header_;
+ MockPacedPacketSender mock_transport_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_ptr<PeerAudioReceiver> receiver_;
+ scoped_refptr<CastThread> cast_thread_;
+ base::SimpleTestTickClock testing_clock_;
+};
+
+TEST_F(AudioReceiverTest, GetOnePacketEncodedframe) {
+ Configure(true);
+ receiver_->IncomingParsedRtpPacket(
+ payload_.data(), payload_.size(), rtp_header_);
+ EncodedAudioFrame audio_frame;
+ base::TimeTicks playout_time;
+ EXPECT_TRUE(receiver_->GetEncodedAudioFrame(&audio_frame, &playout_time));
+ EXPECT_EQ(0, audio_frame.frame_id);
+ EXPECT_EQ(kPcm16, audio_frame.codec);
+ task_runner_->RunTasks();
+}
+
+// TODO(mikhal): Add encoded frames.
+TEST_F(AudioReceiverTest, GetRawFrame) {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/audio_sender/audio_encoder.cc b/chromium/media/cast/audio_sender/audio_encoder.cc
new file mode 100644
index 00000000000..175f82b3124
--- /dev/null
+++ b/chromium/media/cast/audio_sender/audio_encoder.cc
@@ -0,0 +1,172 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/audio_sender/audio_encoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "third_party/webrtc/modules/interface/module_common_types.h"
+
+namespace media {
+namespace cast {
+
+// 48KHz, 2 channels and 100 ms.
+static const int kMaxNumberOfSamples = 48 * 2 * 100;
+
+// This class is only called from the cast audio encoder thread.
+class WebrtEncodedDataCallback : public webrtc::AudioPacketizationCallback {
+ public:
+ WebrtEncodedDataCallback(scoped_refptr<CastThread> cast_thread,
+ AudioCodec codec,
+ int frequency)
+ : codec_(codec),
+ frequency_(frequency),
+ cast_thread_(cast_thread),
+ last_timestamp_(0) {}
+
+ virtual int32 SendData(
+ webrtc::FrameType /*frame_type*/,
+ uint8 /*payload_type*/,
+ uint32 timestamp,
+ const uint8* payload_data,
+ uint16 payload_size,
+ const webrtc::RTPFragmentationHeader* /*fragmentation*/) {
+ scoped_ptr<EncodedAudioFrame> audio_frame(new EncodedAudioFrame());
+ audio_frame->codec = codec_;
+ audio_frame->samples = timestamp - last_timestamp_;
+ DCHECK(audio_frame->samples <= kMaxNumberOfSamples);
+ last_timestamp_ = timestamp;
+ audio_frame->data.insert(audio_frame->data.begin(),
+ payload_data,
+ payload_data + payload_size);
+
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(*frame_encoded_callback_, base::Passed(&audio_frame),
+ recorded_time_));
+ return 0;
+ }
+
+ void SetEncodedCallbackInfo(
+ const base::TimeTicks& recorded_time,
+ const AudioEncoder::FrameEncodedCallback* frame_encoded_callback) {
+ recorded_time_ = recorded_time;
+ frame_encoded_callback_ = frame_encoded_callback;
+ }
+
+ private:
+ const AudioCodec codec_;
+ const int frequency_;
+ scoped_refptr<CastThread> cast_thread_;
+ uint32 last_timestamp_;
+ base::TimeTicks recorded_time_;
+ const AudioEncoder::FrameEncodedCallback* frame_encoded_callback_;
+};
+
+AudioEncoder::AudioEncoder(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config)
+ : cast_thread_(cast_thread),
+ audio_encoder_(webrtc::AudioCodingModule::Create(0)),
+ webrtc_encoder_callback_(
+ new WebrtEncodedDataCallback(cast_thread, audio_config.codec,
+ audio_config.frequency)),
+ timestamp_(0) { // Must start at 0; used above.
+
+ if (audio_encoder_->InitializeSender() != 0) {
+ DCHECK(false) << "Invalid webrtc return value";
+ }
+ if (audio_encoder_->RegisterTransportCallback(
+ webrtc_encoder_callback_.get()) != 0) {
+ DCHECK(false) << "Invalid webrtc return value";
+ }
+ webrtc::CodecInst send_codec;
+ send_codec.pltype = audio_config.rtp_payload_type;
+ send_codec.plfreq = audio_config.frequency;
+ send_codec.channels = audio_config.channels;
+
+ switch (audio_config.codec) {
+ case kOpus:
+ strncpy(send_codec.plname, "opus", sizeof(send_codec.plname));
+ send_codec.pacsize = audio_config.frequency / 50; // 20 ms
+ send_codec.rate = audio_config.bitrate; // 64000
+ break;
+ case kPcm16:
+ strncpy(send_codec.plname, "L16", sizeof(send_codec.plname));
+ send_codec.pacsize = audio_config.frequency / 100; // 10 ms
+ // TODO(pwestin) bug in webrtc; it should take audio_config.channels into
+ // account.
+ send_codec.rate = 8 * 2 * audio_config.frequency;
+ break;
+ default:
+ DCHECK(false) << "Codec must be specified for audio encoder";
+ return;
+ }
+ if (audio_encoder_->RegisterSendCodec(send_codec) != 0) {
+ DCHECK(false) << "Invalid webrtc return value; failed to register codec";
+ }
+}
+
+AudioEncoder::~AudioEncoder() {
+ webrtc::AudioCodingModule::Destroy(audio_encoder_);
+}
+
+// Called from main cast thread.
+void AudioEncoder::InsertRawAudioFrame(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure release_callback) {
+ cast_thread_->PostTask(CastThread::AUDIO_ENCODER, FROM_HERE,
+ base::Bind(&AudioEncoder::EncodeAudioFrameThread, this, audio_frame,
+ recorded_time, frame_encoded_callback, release_callback));
+}
+
+// Called from cast audio encoder thread.
+void AudioEncoder::EncodeAudioFrameThread(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure release_callback) {
+ int samples_per_10ms = audio_frame->frequency / 100;
+ int number_of_10ms_blocks = audio_frame->samples.size() /
+ (samples_per_10ms * audio_frame->channels);
+ DCHECK(webrtc::AudioFrame::kMaxDataSizeSamples > samples_per_10ms)
+ << "webrtc sanity check failed";
+
+ for (int i = 0; i < number_of_10ms_blocks; ++i) {
+ webrtc::AudioFrame webrtc_audio_frame;
+ webrtc_audio_frame.timestamp_ = timestamp_;
+
+ // Due to the webrtc::AudioFrame declaration we need to copy our data into
+ // the webrtc structure.
+ memcpy(&webrtc_audio_frame.data_[0],
+ &audio_frame->samples[i * samples_per_10ms * audio_frame->channels],
+ samples_per_10ms * audio_frame->channels * sizeof(int16));
+ webrtc_audio_frame.samples_per_channel_ = samples_per_10ms;
+ webrtc_audio_frame.sample_rate_hz_ = audio_frame->frequency;
+ webrtc_audio_frame.num_channels_ = audio_frame->channels;
+
+ // webrtc::AudioCodingModule is thread safe.
+ if (audio_encoder_->Add10MsData(webrtc_audio_frame) != 0) {
+ DCHECK(false) << "Invalid webrtc return value";
+ }
+ timestamp_ += samples_per_10ms;
+ }
+ // We are done with the audio frame release it.
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, release_callback);
+
+ // Note:
+ // Not all insert of 10 ms will generate a callback with encoded data.
+ webrtc_encoder_callback_->SetEncodedCallbackInfo(recorded_time,
+ &frame_encoded_callback);
+ for (int i = 0; i < number_of_10ms_blocks; ++i) {
+ audio_encoder_->Process();
+ }
+}
+
+} // namespace media
+} // namespace cast
diff --git a/chromium/media/cast/audio_sender/audio_encoder.h b/chromium/media/cast/audio_sender/audio_encoder.h
new file mode 100644
index 00000000000..8aacb0b4759
--- /dev/null
+++ b/chromium/media/cast/audio_sender/audio_encoder.h
@@ -0,0 +1,63 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
+#define MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace webrtc {
+class AudioCodingModule;
+}
+
+namespace media {
+namespace cast {
+
+class WebrtEncodedDataCallback;
+
+// Thread safe class.
+// It should be called from the main cast thread; however that is not required.
+class AudioEncoder : public base::RefCountedThreadSafe<AudioEncoder> {
+ public:
+ typedef base::Callback<void(scoped_ptr<EncodedAudioFrame>,
+ const base::TimeTicks&)> FrameEncodedCallback;
+
+ AudioEncoder(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config);
+
+ virtual ~AudioEncoder();
+
+ // The audio_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure callback);
+
+ private:
+ void EncodeAudioFrameThread(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure release_callback);
+
+ scoped_refptr<CastThread> cast_thread_;
+ // Can't use scoped_ptr due to protected constructor within webrtc.
+ webrtc::AudioCodingModule* audio_encoder_;
+ scoped_ptr<WebrtEncodedDataCallback> webrtc_encoder_callback_;
+ uint32 timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioEncoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
diff --git a/chromium/media/cast/audio_sender/audio_encoder_unittest.cc b/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
new file mode 100644
index 00000000000..0b17f980569
--- /dev/null
+++ b/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/audio_sender/audio_encoder.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+static void RelaseFrame(const PcmAudioFrame* frame) {
+ delete frame;
+}
+
+static void FrameEncoded(scoped_ptr<EncodedAudioFrame> encoded_frame,
+ const base::TimeTicks& recorded_time) {
+}
+
+class AudioEncoderTest : public ::testing::Test {
+ protected:
+ AudioEncoderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ AudioSenderConfig audio_config;
+ audio_config.codec = kOpus;
+ audio_config.use_external_encoder = false;
+ audio_config.frequency = 48000;
+ audio_config.channels = 2;
+ audio_config.bitrate = 64000;
+ audio_config.rtp_payload_type = 127;
+
+ audio_encoder_ = new AudioEncoder(cast_thread_, audio_config);
+ }
+
+ ~AudioEncoderTest() {}
+
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<AudioEncoder> audio_encoder_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(AudioEncoderTest, Encode20ms) {
+ PcmAudioFrame* audio_frame = new PcmAudioFrame();
+ audio_frame->channels = 2;
+ audio_frame->frequency = 48000;
+ audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
+
+ base::TimeTicks recorded_time;
+ audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
+ base::Bind(&FrameEncoded),
+ base::Bind(&RelaseFrame, audio_frame));
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/audio_sender/audio_sender.cc b/chromium/media/cast/audio_sender/audio_sender.cc
new file mode 100644
index 00000000000..39fccda6370
--- /dev/null
+++ b/chromium/media/cast/audio_sender/audio_sender.cc
@@ -0,0 +1,168 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/audio_sender/audio_sender.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/audio_sender/audio_encoder.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace media {
+namespace cast {
+
+const int64 kMinSchedulingDelayMs = 1;
+
+class LocalRtcpAudioSenderFeedback : public RtcpSenderFeedback {
+ public:
+ explicit LocalRtcpAudioSenderFeedback(AudioSender* audio_sender)
+ : audio_sender_(audio_sender) {
+ }
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedReportBlock(
+ const RtcpReportBlock& report_block) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedIntraFrameRequest() OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+
+ virtual void OnReceivedRpsi(uint8 payload_type,
+ uint64 picture_id) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedNackRequest(
+ const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
+ DCHECK(false) << "Invalid callback";
+ }
+
+ virtual void OnReceivedCastFeedback(
+ const RtcpCastMessage& cast_feedback) OVERRIDE {
+ if (!cast_feedback.missing_frames_and_packets_.empty()) {
+ audio_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
+ }
+ VLOG(1) << "Received audio ACK "
+ << static_cast<int>(cast_feedback.ack_frame_id_);
+ }
+
+ private:
+ AudioSender* audio_sender_;
+};
+
+class LocalRtpSenderStatistics : public RtpSenderStatistics {
+ public:
+ explicit LocalRtpSenderStatistics(RtpSender* rtp_sender)
+ : rtp_sender_(rtp_sender) {
+ }
+
+ virtual void GetStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) OVERRIDE {
+ rtp_sender_->RtpStatistics(now, sender_info);
+ }
+
+ private:
+ RtpSender* rtp_sender_;
+};
+
+AudioSender::AudioSender(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ PacedPacketSender* const paced_packet_sender)
+ : incoming_feedback_ssrc_(audio_config.incoming_feedback_ssrc),
+ cast_thread_(cast_thread),
+ rtp_sender_(&audio_config, NULL, paced_packet_sender),
+ rtcp_feedback_(new LocalRtcpAudioSenderFeedback(this)),
+ rtp_audio_sender_statistics_(
+ new LocalRtpSenderStatistics(&rtp_sender_)),
+ rtcp_(rtcp_feedback_.get(),
+ paced_packet_sender,
+ rtp_audio_sender_statistics_.get(),
+ NULL,
+ audio_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
+ true,
+ audio_config.sender_ssrc,
+ audio_config.rtcp_c_name),
+ clock_(&default_tick_clock_),
+ weak_factory_(this) {
+
+ rtcp_.SetRemoteSSRC(audio_config.incoming_feedback_ssrc);
+
+ if (!audio_config.use_external_encoder) {
+ audio_encoder_ = new AudioEncoder(cast_thread, audio_config);
+ }
+ ScheduleNextRtcpReport();
+}
+
+AudioSender::~AudioSender() {}
+
+void AudioSender::InsertRawAudioFrame(
+ const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) {
+ DCHECK(audio_encoder_.get()) << "Invalid internal state";
+
+
+ audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
+ base::Bind(&AudioSender::SendEncodedAudioFrame,
+ weak_factory_.GetWeakPtr()),
+ callback);
+}
+
+void AudioSender::InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) {
+ DCHECK(audio_encoder_.get() == NULL) << "Invalid internal state";
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ callback.Run();
+}
+
+void AudioSender::SendEncodedAudioFrame(
+ scoped_ptr<EncodedAudioFrame> audio_frame,
+ const base::TimeTicks& recorded_time) {
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame.get(), recorded_time);
+}
+
+void AudioSender::ResendPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets) {
+ rtp_sender_.ResendPackets(missing_frames_and_packets);
+}
+
+void AudioSender::IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback) {
+ rtcp_.IncomingRtcpPacket(packet, length);
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+}
+
+void AudioSender::ScheduleNextRtcpReport() {
+ base::TimeDelta time_to_next =
+ rtcp_.TimeToSendNextRtcpReport() - clock_->NowTicks();
+
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void AudioSender::SendRtcpReport() {
+ rtcp_.SendRtcpReport(incoming_feedback_ssrc_);
+ ScheduleNextRtcpReport();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/audio_sender/audio_sender.gypi b/chromium/media/cast/audio_sender/audio_sender.gypi
new file mode 100644
index 00000000000..3e2a56345b8
--- /dev/null
+++ b/chromium/media/cast/audio_sender/audio_sender.gypi
@@ -0,0 +1,30 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'audio_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc',
+ ],
+ 'sources': [
+ 'audio_encoder.h',
+ 'audio_encoder.cc',
+ 'audio_sender.h',
+ 'audio_sender.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
+ '<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
+ '<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
+ ],
+ },
+ ],
+}
+
+
diff --git a/chromium/media/cast/audio_sender/audio_sender.h b/chromium/media/cast/audio_sender/audio_sender.h
new file mode 100644
index 00000000000..3d389b381f0
--- /dev/null
+++ b/chromium/media/cast/audio_sender/audio_sender.h
@@ -0,0 +1,100 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_AUDIO_SENDER_H_
+#define MEDIA_CAST_AUDIO_SENDER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace media {
+namespace cast {
+
+class AudioEncoder;
+class LocalRtcpAudioSenderFeedback;
+class LocalRtpSenderStatistics;
+class PacedPacketSender;
+
+// This class is not thread safe.
+// It's only called from the main cast thread.
+class AudioSender : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<AudioSender> {
+ public:
+ AudioSender(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ PacedPacketSender* const paced_packet_sender);
+
+ virtual ~AudioSender();
+
+ // The audio_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback);
+
+ // The audio_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback);
+
+ // Only called from the main cast thread.
+ void IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback);
+
+ // Only used for testing.
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ rtcp_.set_clock(clock);
+ rtp_sender_.set_clock(clock);
+ }
+
+ protected:
+ void SendEncodedAudioFrame(scoped_ptr<EncodedAudioFrame> audio_frame,
+ const base::TimeTicks& recorded_time);
+
+ private:
+ friend class LocalRtcpAudioSenderFeedback;
+
+ void ResendPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets);
+
+ void ScheduleNextRtcpReport();
+ void SendRtcpReport();
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ base::WeakPtrFactory<AudioSender> weak_factory_;
+
+ const uint32 incoming_feedback_ssrc_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<AudioEncoder> audio_encoder_;
+ RtpSender rtp_sender_;
+ scoped_ptr<LocalRtpSenderStatistics> rtp_audio_sender_statistics_;
+ scoped_ptr<LocalRtcpAudioSenderFeedback> rtcp_feedback_;
+ Rtcp rtcp_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSender);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_AUDIO_SENDER_H_
+
diff --git a/chromium/media/cast/audio_sender/audio_sender_unittest.cc b/chromium/media/cast/audio_sender/audio_sender_unittest.cc
new file mode 100644
index 00000000000..0b5e2176519
--- /dev/null
+++ b/chromium/media/cast/audio_sender/audio_sender_unittest.cc
@@ -0,0 +1,85 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/audio_sender/audio_sender.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+using testing::_;
+
+static void RelaseFrame(const PcmAudioFrame* frame) {
+ delete frame;
+}
+
+class AudioSenderTest : public ::testing::Test {
+ protected:
+ AudioSenderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ AudioSenderConfig audio_config;
+ audio_config.codec = kOpus;
+ audio_config.use_external_encoder = false;
+ audio_config.frequency = 48000;
+ audio_config.channels = 2;
+ audio_config.bitrate = 64000;
+ audio_config.rtp_payload_type = 127;
+
+ audio_sender_.reset(
+ new AudioSender(cast_thread_, audio_config, &mock_transport_));
+ audio_sender_->set_clock(&testing_clock_);
+ }
+
+ ~AudioSenderTest() {}
+
+ base::SimpleTestTickClock testing_clock_;
+ MockPacedPacketSender mock_transport_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_ptr<AudioSender> audio_sender_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(AudioSenderTest, Encode20ms) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+
+ PcmAudioFrame* audio_frame = new PcmAudioFrame();
+ audio_frame->channels = 2;
+ audio_frame->frequency = 48000;
+ audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
+
+ base::TimeTicks recorded_time;
+ audio_sender_->InsertRawAudioFrame(audio_frame, recorded_time,
+ base::Bind(&RelaseFrame, audio_frame));
+
+ task_runner_->RunTasks();
+}
+
+TEST_F(AudioSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
+
+ // Make sure that we send at least one RTCP packet.
+ base::TimeDelta max_rtcp_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
+ testing_clock_.Advance(max_rtcp_timeout);
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/cast.gyp b/chromium/media/cast/cast.gyp
new file mode 100644
index 00000000000..230a2e1d0bd
--- /dev/null
+++ b/chromium/media/cast/cast.gyp
@@ -0,0 +1,90 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'include_tests%': 1,
+ },
+ 'targets': [
+ {
+ 'target_name': 'cast_config',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'cast_config.cc',
+ 'cast_config.h',
+ 'cast_thread.cc',
+ 'cast_thread.h',
+ ], # source
+ },
+ {
+ 'target_name': 'cast_sender',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'cast_config',
+ 'cast_sender.gyp:cast_sender_impl',
+ ],
+ },
+ {
+ 'target_name': 'cast_receiver',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'cast_config',
+ 'cast_receiver.gyp:cast_receiver_impl',
+ ],
+ },
+ ], # targets,
+ 'conditions': [
+ ['include_tests==1', {
+ 'targets': [
+ {
+ 'target_name': 'cast_unittest',
+ 'type': '<(gtest_target_type)',
+ 'dependencies': [
+ 'cast_sender',
+ 'cast_receiver',
+ 'rtcp/rtcp.gyp:cast_rtcp_test',
+ '<(DEPTH)/base/base.gyp:run_all_unittests',
+ '<(DEPTH)/net/net.gyp:net',
+ '<(DEPTH)/testing/gmock.gyp:gmock',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ ],
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc/',
+ ],
+ 'sources': [
+ 'audio_receiver/audio_decoder_unittest.cc',
+ 'audio_receiver/audio_receiver_unittest.cc',
+ 'audio_sender/audio_encoder_unittest.cc',
+ 'audio_sender/audio_sender_unittest.cc',
+ 'congestion_control/congestion_control_unittest.cc',
+ 'framer/cast_message_builder_unittest.cc',
+ 'framer/frame_buffer_unittest.cc',
+ 'framer/framer_unittest.cc',
+ 'pacing/paced_sender_unittest.cc',
+ 'rtcp/rtcp_receiver_unittest.cc',
+ 'rtcp/rtcp_sender_unittest.cc',
+ 'rtcp/rtcp_unittest.cc',
+ 'rtp_receiver/receiver_stats_unittest.cc',
+ 'rtp_receiver/rtp_parser/test/rtp_packet_builder.cc',
+ 'rtp_receiver/rtp_parser/rtp_parser_unittest.cc',
+ 'rtp_sender/packet_storage/packet_storage_unittest.cc',
+ 'rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
+ 'rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
+ 'rtp_sender/rtp_packetizer/test/rtp_header_parser.h',
+ 'test/fake_task_runner.cc',
+ 'video_receiver/video_decoder_unittest.cc',
+ 'video_receiver/video_receiver_unittest.cc',
+ 'video_sender/video_encoder_unittest.cc',
+ 'video_sender/video_sender_unittest.cc',
+ ], # source
+ },
+ ], # targets
+ }], # include_tests
+ ],
+}
diff --git a/chromium/media/cast/cast_config.cc b/chromium/media/cast/cast_config.cc
new file mode 100644
index 00000000000..97c707353a8
--- /dev/null
+++ b/chromium/media/cast/cast_config.cc
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/cast_config.h"
+
+namespace media {
+namespace cast {
+
+VideoSenderConfig::VideoSenderConfig()
+ : rtcp_interval(kDefaultRtcpIntervalMs),
+ rtcp_mode(kRtcpReducedSize),
+ rtp_history_ms(kDefaultRtpHistoryMs),
+ rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
+ congestion_control_back_off(kDefaultCongestionControlBackOff),
+ max_qp(kDefaultMaxQp),
+ min_qp(kDefaultMinQp),
+ max_frame_rate(kDefaultMaxFrameRate),
+ max_number_of_video_buffers_used(kDefaultNumberOfVideoBuffers) {}
+
+AudioSenderConfig::AudioSenderConfig()
+ : rtcp_interval(kDefaultRtcpIntervalMs),
+ rtcp_mode(kRtcpReducedSize),
+ rtp_history_ms(kDefaultRtpHistoryMs),
+ rtp_max_delay_ms(kDefaultRtpMaxDelayMs) {}
+
+AudioReceiverConfig::AudioReceiverConfig()
+ : rtcp_interval(kDefaultRtcpIntervalMs),
+ rtcp_mode(kRtcpReducedSize),
+ rtp_max_delay_ms(kDefaultRtpMaxDelayMs) {}
+
+VideoReceiverConfig::VideoReceiverConfig()
+ : rtcp_interval(kDefaultRtcpIntervalMs),
+ rtcp_mode(kRtcpReducedSize),
+ rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
+ max_frame_rate(kDefaultMaxFrameRate),
+ decoder_faster_than_max_frame_rate(true) {}
+
+EncodedVideoFrame::EncodedVideoFrame() {}
+EncodedVideoFrame::~EncodedVideoFrame() {}
+
+EncodedAudioFrame::EncodedAudioFrame() {}
+EncodedAudioFrame::~EncodedAudioFrame() {}
+
+PcmAudioFrame::PcmAudioFrame() {}
+PcmAudioFrame::~PcmAudioFrame() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/cast_config.h b/chromium/media/cast/cast_config.h
new file mode 100644
index 00000000000..988924aab45
--- /dev/null
+++ b/chromium/media/cast/cast_config.h
@@ -0,0 +1,218 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CAST_CONFIG_H_
+#define MEDIA_CAST_CAST_CONFIG_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/cast/cast_defines.h"
+
+namespace media {
+namespace cast {
+
+enum RtcpMode {
+ kRtcpCompound, // Compound RTCP mode is described by RFC 4585.
+ kRtcpReducedSize, // Reduced-size RTCP mode is described by RFC 5506.
+};
+
+enum VideoCodec {
+ kVp8,
+ kH264,
+ kExternalVideo,
+};
+
+enum AudioCodec {
+ kOpus,
+ kPcm16,
+ kExternalAudio,
+};
+
+struct AudioSenderConfig {
+ AudioSenderConfig();
+
+ uint32 sender_ssrc;
+ uint32 incoming_feedback_ssrc;
+
+ int rtcp_interval;
+ std::string rtcp_c_name;
+ RtcpMode rtcp_mode;
+
+ int rtp_history_ms; // The time RTP packets are stored for retransmissions.
+ int rtp_max_delay_ms;
+ int rtp_payload_type;
+
+ bool use_external_encoder;
+ int frequency;
+ int channels;
+ int bitrate;
+ AudioCodec codec;
+};
+
+struct VideoSenderConfig {
+ VideoSenderConfig();
+
+ uint32 sender_ssrc;
+ uint32 incoming_feedback_ssrc;
+
+ int rtcp_interval;
+ std::string rtcp_c_name;
+ RtcpMode rtcp_mode;
+
+ int rtp_history_ms; // The time RTP packets are stored for retransmissions.
+ int rtp_max_delay_ms;
+ int rtp_payload_type;
+
+ bool use_external_encoder;
+ int width; // Incoming frames will be scaled to this size.
+ int height;
+
+ float congestion_control_back_off;
+ int max_bitrate;
+ int min_bitrate;
+ int start_bitrate;
+ int max_qp;
+ int min_qp;
+ int max_frame_rate;
+ int max_number_of_video_buffers_used; // Max value depend on codec.
+ VideoCodec codec;
+ int number_of_cores;
+};
+
+struct AudioReceiverConfig {
+ AudioReceiverConfig();
+
+ uint32 feedback_ssrc;
+ uint32 incoming_ssrc;
+
+ int rtcp_interval;
+ std::string rtcp_c_name;
+ RtcpMode rtcp_mode;
+
+ // The time the receiver is prepared to wait for retransmissions.
+ int rtp_max_delay_ms;
+ int rtp_payload_type;
+
+ bool use_external_decoder;
+ int frequency;
+ int channels;
+ AudioCodec codec;
+};
+
+struct VideoReceiverConfig {
+ VideoReceiverConfig();
+
+ uint32 feedback_ssrc;
+ uint32 incoming_ssrc;
+
+ int rtcp_interval;
+ std::string rtcp_c_name;
+ RtcpMode rtcp_mode;
+
+ // The time the receiver is prepared to wait for retransmissions.
+ int rtp_max_delay_ms;
+ int rtp_payload_type;
+
+ bool use_external_decoder;
+ int max_frame_rate;
+
+ // Some HW decoders can not run faster than the frame rate, preventing it
+ // from catching up after a glitch.
+ bool decoder_faster_than_max_frame_rate;
+ VideoCodec codec;
+};
+
+struct I420VideoPlane {
+ int stride;
+ int length;
+ uint8* data;
+};
+
+struct I420VideoFrame {
+ int width;
+ int height;
+ I420VideoPlane y_plane;
+ I420VideoPlane u_plane;
+ I420VideoPlane v_plane;
+};
+
+struct EncodedVideoFrame {
+ EncodedVideoFrame();
+ ~EncodedVideoFrame();
+
+ VideoCodec codec;
+ bool key_frame;
+ uint8 frame_id;
+ uint8 last_referenced_frame_id;
+ std::vector<uint8> data;
+};
+
+struct PcmAudioFrame {
+ PcmAudioFrame();
+ ~PcmAudioFrame();
+
+ int channels; // Samples in interleaved stereo format. L0, R0, L1 ,R1 ,...
+ int frequency;
+ std::vector<int16> samples;
+};
+
+struct EncodedAudioFrame {
+ EncodedAudioFrame();
+ ~EncodedAudioFrame();
+
+ AudioCodec codec;
+ uint8 frame_id; // Needed to release the frame. Not used send side.
+ int samples; // Needed send side to advance the RTP timestamp.
+ // Not used receive side.
+ std::vector<uint8> data;
+};
+
+class PacketSender {
+ public:
+ // All packets to be sent to the network will be delivered via this function.
+ virtual bool SendPacket(const uint8* packet, int length) = 0;
+
+ virtual ~PacketSender() {}
+};
+
+class PacketReceiver : public base::RefCountedThreadSafe<PacketReceiver> {
+ public:
+ // All packets received from the network should be delivered via this
+ // function.
+ virtual void ReceivedPacket(const uint8* packet, int length,
+ const base::Closure callback) = 0;
+
+ virtual ~PacketReceiver() {}
+};
+
+class VideoEncoderController {
+ public:
+ // Inform the encoder about the new target bit rate.
+ virtual void SetBitRate(int new_bit_rate) = 0;
+
+ // Inform the encoder to not encode the next frame.
+ // Note: this setting is sticky and should last until called with false.
+ virtual void SkipNextFrame(bool skip_next_frame) = 0;
+
+ // Inform the encoder to encode the next frame as a key frame.
+ virtual void GenerateKeyFrame() = 0;
+
+ // Inform the encoder to only reference frames older or equal to frame_id;
+ virtual void LatestFrameIdToReference(uint8 frame_id) = 0;
+
+ // Query the codec about how many frames it has skipped due to slow ACK.
+ virtual int NumberOfSkippedFrames() const = 0;
+
+ protected:
+ virtual ~VideoEncoderController() {}
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_CONFIG_H_
diff --git a/chromium/media/cast/cast_defines.h b/chromium/media/cast/cast_defines.h
new file mode 100644
index 00000000000..13717323405
--- /dev/null
+++ b/chromium/media/cast/cast_defines.h
@@ -0,0 +1,122 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CAST_DEFINES_H_
+#define MEDIA_CAST_CAST_DEFINES_H_
+
+#include <map>
+#include <set>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+const int64 kDontShowTimeoutMs = 33;
+const float kDefaultCongestionControlBackOff = 0.875f;
+const uint8 kStartFrameId = 255;
+const uint32 kVideoFrequency = 90000;
+const int64 kSkippedFramesCheckPeriodkMs = 10000;
+
+// Number of skipped frames threshold in fps (as configured) per period above.
+const int kSkippedFramesThreshold = 3;
+const size_t kIpPacketSize = 1500;
+const int kStartRttMs = 20;
+const int64 kCastMessageUpdateIntervalMs = 33;
+const int64 kNackRepeatIntervalMs = 30;
+
+enum DefaultSettings {
+ kDefaultMaxQp = 56,
+ kDefaultMinQp = 4,
+ kDefaultMaxFrameRate = 30,
+ kDefaultNumberOfVideoBuffers = 1,
+ kDefaultRtcpIntervalMs = 500,
+ kDefaultRtpHistoryMs = 1000,
+ kDefaultRtpMaxDelayMs = 100,
+};
+
+const uint16 kRtcpCastAllPacketsLost = 0xffff;
+
+// Each uint16 represents one packet id within a cast frame.
+typedef std::set<uint16> PacketIdSet;
+// Each uint8 represents one cast frame.
+typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
+
+// TODO(pwestin): Re-factor the functions bellow into a class with static
+// methods.
+
+// Magic fractional unit. Used to convert time (in microseconds) to/from
+// fractional NTP seconds.
+static const double kMagicFractionalUnit = 4.294967296E3;
+
+// Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
+// 1 January 1900.
+static const int64 kNtpEpochDeltaSeconds = GG_INT64_C(9435484800);
+static const int64 kNtpEpochDeltaMicroseconds =
+ kNtpEpochDeltaSeconds * base::Time::kMicrosecondsPerSecond;
+
+inline bool IsNewerFrameId(uint8 frame_id, uint8 prev_frame_id) {
+ return (frame_id != prev_frame_id) &&
+ static_cast<uint8>(frame_id - prev_frame_id) < 0x80;
+}
+
+inline bool IsOlderFrameId(uint8 frame_id, uint8 prev_frame_id) {
+ return (frame_id == prev_frame_id) || IsNewerFrameId(prev_frame_id, frame_id);
+}
+
+inline bool IsNewerPacketId(uint16 packet_id, uint16 prev_packet_id) {
+ return (packet_id != prev_packet_id) &&
+ static_cast<uint16>(packet_id - prev_packet_id) < 0x8000;
+}
+
+inline bool IsNewerSequenceNumber(uint16 sequence_number,
+ uint16 prev_sequence_number) {
+ // Same function as IsNewerPacketId just different data and name.
+ return IsNewerPacketId(sequence_number, prev_sequence_number);
+}
+
+// Create a NTP diff from seconds and fractions of seconds; delay_fraction is
+// fractions of a second where 0x80000000 is half a second.
+inline uint32 ConvertToNtpDiff(uint32 delay_seconds, uint32 delay_fraction) {
+ return ((delay_seconds & 0x0000FFFF) << 16) +
+ ((delay_fraction & 0xFFFF0000) >> 16);
+}
+
+inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
+ uint32 delay_ms = (ntp_delay & 0x0000ffff) * 1000;
+ delay_ms >>= 16;
+ delay_ms += ((ntp_delay & 0xffff0000) >> 16) * 1000;
+ return base::TimeDelta::FromMilliseconds(delay_ms);
+}
+
+inline void ConvertTimeToFractions(int64 time_us,
+ uint32* seconds,
+ uint32* fractions) {
+ *seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
+ *fractions = static_cast<uint32>(
+ (time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
+}
+
+inline void ConvertTimeToNtp(const base::TimeTicks& time,
+ uint32* ntp_seconds,
+ uint32* ntp_fractions) {
+ int64 time_us = time.ToInternalValue() - kNtpEpochDeltaMicroseconds;
+ ConvertTimeToFractions(time_us, ntp_seconds, ntp_fractions);
+}
+
+inline base::TimeTicks ConvertNtpToTime(uint32 ntp_seconds,
+ uint32 ntp_fractions) {
+ int64 ntp_time_us = static_cast<int64>(ntp_seconds) *
+ base::Time::kMicrosecondsPerSecond;
+ ntp_time_us += static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
+ return base::TimeTicks::FromInternalValue(ntp_time_us +
+ kNtpEpochDeltaMicroseconds);
+}
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_DEFINES_H_
diff --git a/chromium/media/cast/cast_receiver.gyp b/chromium/media/cast/cast_receiver.gyp
new file mode 100644
index 00000000000..539c41d89a9
--- /dev/null
+++ b/chromium/media/cast/cast_receiver.gyp
@@ -0,0 +1,28 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'audio_receiver/audio_receiver.gypi',
+ 'video_receiver/video_receiver.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'cast_receiver_impl',
+ 'type': 'static_library',
+ 'sources': [
+ 'cast_receiver.h',
+# 'cast_receiver_impl.cc',
+# 'cast_receiver_impl.h',
+ ], # source
+ 'dependencies': [
+ 'rtp_receiver/rtp_receiver.gyp:*',
+ 'cast_audio_receiver',
+ 'cast_video_receiver',
+ 'framer/framer.gyp:cast_framer',
+ 'pacing/paced_sender.gyp:paced_sender',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/cast_receiver.h b/chromium/media/cast/cast_receiver.h
new file mode 100644
index 00000000000..a2eef765607
--- /dev/null
+++ b/chromium/media/cast/cast_receiver.h
@@ -0,0 +1,75 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This is the main interface for the cast receiver. All configuration are done
+// at creation.
+
+#ifndef MEDIA_CAST_CAST_RECEIVER_H_
+#define MEDIA_CAST_CAST_RECEIVER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+
+namespace media {
+namespace cast {
+// Callback in which the raw audio frame and render time will be returned
+// once decoding is complete.
+typedef base::Callback<void(scoped_ptr<PcmAudioFrame>,
+ const base::TimeTicks)> AudioFrameDecodedCallback;
+
+// Callback in which the raw frame and render time will be returned once
+// decoding is complete.
+typedef base::Callback<void(scoped_ptr<I420VideoFrame>,
+ const base::TimeTicks)> VideoFrameDecodedCallback;
+
+// This Class is thread safe.
+class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver>{
+ public:
+ virtual bool GetRawVideoFrame(const VideoFrameDecodedCallback& callback) = 0;
+
+ virtual bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
+ base::TimeTicks* render_time) = 0;
+
+ virtual void ReleaseEncodedVideoFrame(uint8 frame_id) = 0;
+
+ virtual bool GetRawAudioFrame(int number_of_10ms_blocks,
+ int desired_frequency,
+ const AudioFrameDecodedCallback callback) = 0;
+
+ virtual bool GetCodedAudioFrame(EncodedAudioFrame* audio_frame,
+ base::TimeTicks* playout_time) = 0;
+
+ virtual void ReleaseCodedAudioFrame(uint8 frame_id) = 0;
+
+ virtual ~FrameReceiver() {}
+};
+
+// This Class is thread safe.
+class CastReceiver {
+ public:
+ static CastReceiver* CreateCastReceiver(
+ scoped_refptr<CastThread> cast_thread,
+ const AudioReceiverConfig& audio_config,
+ const VideoReceiverConfig& video_config,
+ PacketSender* const packet_sender);
+
+ // All received RTP and RTCP packets for the call should be inserted to this
+ // PacketReceiver.
+ virtual scoped_refptr<PacketReceiver> packet_receiver() = 0;
+
+ // Polling interface to get audio and video frames from the CastReceiver.
+ virtual scoped_refptr<FrameReceiver> frame_receiver() = 0;
+
+ virtual ~CastReceiver() {};
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_RECEIVER_H_
diff --git a/chromium/media/cast/cast_sender.gyp b/chromium/media/cast/cast_sender.gyp
new file mode 100644
index 00000000000..fe99f803820
--- /dev/null
+++ b/chromium/media/cast/cast_sender.gyp
@@ -0,0 +1,35 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'audio_sender/audio_sender.gypi',
+ 'congestion_control/congestion_control.gypi',
+ 'video_sender/video_sender.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'cast_sender_impl',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc/',
+ ],
+ 'sources': [
+ 'cast_sender.h',
+ 'cast_sender_impl.cc',
+ 'cast_sender_impl.h',
+ ], # source
+ 'dependencies': [
+ 'audio_sender',
+ 'congestion_control',
+ 'pacing/paced_sender.gyp:paced_sender',
+ 'rtcp/rtcp.gyp:cast_rtcp',
+ 'rtp_sender/rtp_sender.gyp:cast_rtp_sender',
+ 'video_sender',
+ ], # dependencies
+ },
+ ],
+}
diff --git a/chromium/media/cast/cast_sender.h b/chromium/media/cast/cast_sender.h
new file mode 100644
index 00000000000..f4d36539b44
--- /dev/null
+++ b/chromium/media/cast/cast_sender.h
@@ -0,0 +1,89 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This is the main interface for the cast sender. All configuration are done
+// at creation.
+//
+// The FrameInput and PacketReciever interfaces should normally be accessed from
+// the IO thread. However they are allowed to be called from any thread.
+
+#ifndef MEDIA_CAST_CAST_SENDER_H_
+#define MEDIA_CAST_CAST_SENDER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+
+namespace media {
+namespace cast {
+
+// This Class is thread safe.
+class FrameInput : public base::RefCountedThreadSafe<PacketReceiver> {
+ public:
+ // The video_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) = 0;
+
+ // The video_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) = 0;
+
+ // The audio_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) = 0;
+
+ // The audio_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) = 0;
+
+ virtual ~FrameInput() {}
+};
+
+// This Class is thread safe.
+// The provided PacketSender object will always be called form the main cast
+// thread.
+class CastSender {
+ public:
+ static CastSender* CreateCastSender(
+ scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacketSender* const packet_sender);
+
+ virtual ~CastSender() {};
+
+ // All audio and video frames for the session should be inserted to this
+ // object.
+ // Can be called from any thread.
+ virtual scoped_refptr<FrameInput> frame_input() = 0;
+
+ // All RTCP packets for the session should be inserted to this object.
+ // Can be called from any thread.
+ virtual scoped_refptr<PacketReceiver> packet_receiver() = 0;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_SENDER_H_
diff --git a/chromium/media/cast/cast_sender_impl.cc b/chromium/media/cast/cast_sender_impl.cc
new file mode 100644
index 00000000000..76f2f997651
--- /dev/null
+++ b/chromium/media/cast/cast_sender_impl.cc
@@ -0,0 +1,176 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "media/cast/cast_sender_impl.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+namespace cast {
+
+// The LocalFrameInput class posts all incoming frames; audio and video to the
+// main cast thread for processing.
+// This make the cast sender interface thread safe.
+class LocalFrameInput : public FrameInput {
+ public:
+ LocalFrameInput(scoped_refptr<CastThread> cast_thread,
+ base::WeakPtr<AudioSender> audio_sender,
+ base::WeakPtr<VideoSender> video_sender)
+ : cast_thread_(cast_thread),
+ audio_sender_(audio_sender),
+ video_sender_(video_sender) {}
+
+ virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::InsertRawVideoFrame, video_sender_,
+ video_frame, capture_time, callback));
+ }
+
+ virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::InsertCodedVideoFrame, video_sender_,
+ video_frame, capture_time, callback));
+ }
+
+ virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::InsertRawAudioFrame, audio_sender_,
+ audio_frame, recorded_time, callback));
+ }
+
+ virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) OVERRIDE {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::InsertCodedAudioFrame, audio_sender_,
+ audio_frame, recorded_time, callback));
+ }
+
+ private:
+ scoped_refptr<CastThread> cast_thread_;
+ base::WeakPtr<AudioSender> audio_sender_;
+ base::WeakPtr<VideoSender> video_sender_;
+};
+
+// LocalCastSenderPacketReceiver handle the incoming packets to the cast sender
+// it's only expected to receive RTCP feedback packets from the remote cast
+// receiver. The class verifies that that it is a RTCP packet and based on the
+// SSRC of the incoming packet route the packet to the correct sender; audio or
+// video.
+//
+// Definition of SSRC as defined in RFC 3550.
+// Synchronization source (SSRC): The source of a stream of RTP
+// packets, identified by a 32-bit numeric SSRC identifier carried in
+// the RTP header so as not to be dependent upon the network address.
+// All packets from a synchronization source form part of the same
+// timing and sequence number space, so a receiver groups packets by
+// synchronization source for playback. Examples of synchronization
+// sources include the sender of a stream of packets derived from a
+// signal source such as a microphone or a camera, or an RTP mixer
+// (see below). A synchronization source may change its data format,
+// e.g., audio encoding, over time. The SSRC identifier is a
+// randomly chosen value meant to be globally unique within a
+// particular RTP session (see Section 8). A participant need not
+// use the same SSRC identifier for all the RTP sessions in a
+// multimedia session; the binding of the SSRC identifiers is
+// provided through RTCP (see Section 6.5.1). If a participant
+// generates multiple streams in one RTP session, for example from
+// separate video cameras, each MUST be identified as a different
+// SSRC.
+
+class LocalCastSenderPacketReceiver : public PacketReceiver {
+ public:
+ LocalCastSenderPacketReceiver(scoped_refptr<CastThread> cast_thread,
+ base::WeakPtr<AudioSender> audio_sender,
+ base::WeakPtr<VideoSender> video_sender,
+ uint32 ssrc_of_audio_sender,
+ uint32 ssrc_of_video_sender)
+ : cast_thread_(cast_thread),
+ audio_sender_(audio_sender),
+ video_sender_(video_sender),
+ ssrc_of_audio_sender_(ssrc_of_audio_sender),
+ ssrc_of_video_sender_(ssrc_of_video_sender) {}
+
+ virtual ~LocalCastSenderPacketReceiver() {}
+
+ virtual void ReceivedPacket(const uint8* packet,
+ int length,
+ const base::Closure callback) OVERRIDE {
+ if (!Rtcp::IsRtcpPacket(packet, length)) {
+ // We should have no incoming RTP packets.
+ // No action; just log and call the callback informing that we are done
+ // with the packet.
+ VLOG(1) << "Unexpectedly received a RTP packet in the cast sender";
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ return;
+ }
+ uint32 ssrc_of_sender = Rtcp::GetSsrcOfSender(packet, length);
+ if (ssrc_of_sender == ssrc_of_audio_sender_) {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::IncomingRtcpPacket, audio_sender_,
+ packet, length, callback));
+ } else if (ssrc_of_sender == ssrc_of_video_sender_) {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::IncomingRtcpPacket, video_sender_,
+ packet, length, callback));
+ } else {
+ // No action; just log and call the callback informing that we are done
+ // with the packet.
+ VLOG(1) << "Received a RTCP packet with a non matching sender SSRC "
+ << ssrc_of_sender;
+
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ }
+ }
+
+ private:
+ scoped_refptr<CastThread> cast_thread_;
+ base::WeakPtr<AudioSender> audio_sender_;
+ base::WeakPtr<VideoSender> video_sender_;
+ uint32 ssrc_of_audio_sender_;
+ uint32 ssrc_of_video_sender_;
+};
+
+CastSender* CastSender::CreateCastSender(
+ scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacketSender* const packet_sender) {
+ return new CastSenderImpl(cast_thread,
+ audio_config,
+ video_config,
+ video_encoder_controller,
+ packet_sender);
+}
+
+CastSenderImpl::CastSenderImpl(
+ scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacketSender* const packet_sender)
+ : pacer_(cast_thread, packet_sender),
+ audio_sender_(cast_thread, audio_config, &pacer_),
+ video_sender_(cast_thread, video_config, video_encoder_controller,
+ &pacer_),
+ frame_input_(new LocalFrameInput(cast_thread, audio_sender_.AsWeakPtr(),
+ video_sender_.AsWeakPtr())),
+ packet_receiver_(new LocalCastSenderPacketReceiver(cast_thread,
+ audio_sender_.AsWeakPtr(), video_sender_.AsWeakPtr(),
+ audio_config.incoming_feedback_ssrc,
+ video_config.incoming_feedback_ssrc)) {}
+
+CastSenderImpl::~CastSenderImpl() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/cast_sender_impl.h b/chromium/media/cast/cast_sender_impl.h
new file mode 100644
index 00000000000..eb19caa247b
--- /dev/null
+++ b/chromium/media/cast/cast_sender_impl.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef MEDIA_CAST_CAST_SENDER_IMPL_H_
+#define MEDIA_CAST_CAST_SENDER_IMPL_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/audio_sender/audio_sender.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_sender.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/video_sender/video_sender.h"
+
+namespace media {
+namespace cast {
+
+class AudioSender;
+class PacedSender;
+class VideoSender;
+
+// This calls is a pure owner class that group all required sending objects
+// together such as pacer, packet receiver, frame input, audio and video sender.
+class CastSenderImpl : public CastSender {
+ public:
+ CastSenderImpl(scoped_refptr<CastThread> cast_thread,
+ const AudioSenderConfig& audio_config,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacketSender* const packet_sender);
+
+ virtual ~CastSenderImpl();
+
+ virtual scoped_refptr<FrameInput> frame_input() OVERRIDE {
+ return frame_input_;
+ }
+
+ virtual scoped_refptr<PacketReceiver> packet_receiver() OVERRIDE {
+ return packet_receiver_;
+ }
+
+ private:
+ PacedSender pacer_;
+ AudioSender audio_sender_;
+ VideoSender video_sender_;
+ scoped_refptr<FrameInput> frame_input_;
+ scoped_refptr<PacketReceiver> packet_receiver_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_SENDER_IMPL_H_
+
diff --git a/chromium/media/cast/cast_thread.cc b/chromium/media/cast/cast_thread.cc
new file mode 100644
index 00000000000..4d294c46568
--- /dev/null
+++ b/chromium/media/cast/cast_thread.cc
@@ -0,0 +1,64 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/cast_thread.h"
+
+#include "base/logging.h"
+
+using base::TaskRunner;
+
+namespace media {
+namespace cast {
+
+CastThread::CastThread(
+ scoped_refptr<TaskRunner> main_thread_proxy,
+ scoped_refptr<TaskRunner> audio_encode_thread_proxy,
+ scoped_refptr<TaskRunner> audio_decode_thread_proxy,
+ scoped_refptr<TaskRunner> video_encode_thread_proxy,
+ scoped_refptr<TaskRunner> video_decode_thread_proxy)
+ : main_thread_proxy_(main_thread_proxy),
+ audio_encode_thread_proxy_(audio_encode_thread_proxy),
+ audio_decode_thread_proxy_(audio_decode_thread_proxy),
+ video_encode_thread_proxy_(video_encode_thread_proxy),
+ video_decode_thread_proxy_(video_decode_thread_proxy) {
+ DCHECK(main_thread_proxy) << "Main thread required";
+}
+
+bool CastThread::PostTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ scoped_refptr<TaskRunner> task_runner =
+ GetMessageTaskRunnerForThread(identifier);
+
+ return task_runner->PostTask(from_here, task);
+}
+
+bool CastThread::PostDelayedTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ scoped_refptr<TaskRunner> task_runner =
+ GetMessageTaskRunnerForThread(identifier);
+
+ return task_runner->PostDelayedTask(from_here, task, delay);
+}
+
+scoped_refptr<TaskRunner> CastThread::GetMessageTaskRunnerForThread(
+ ThreadId identifier) {
+ switch (identifier) {
+ case CastThread::MAIN:
+ return main_thread_proxy_;
+ case CastThread::AUDIO_ENCODER:
+ return audio_encode_thread_proxy_;
+ case CastThread::AUDIO_DECODER:
+ return audio_decode_thread_proxy_;
+ case CastThread::VIDEO_ENCODER:
+ return video_encode_thread_proxy_;
+ case CastThread::VIDEO_DECODER:
+ return video_decode_thread_proxy_;
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/cast_thread.h b/chromium/media/cast/cast_thread.h
new file mode 100644
index 00000000000..b004157042e
--- /dev/null
+++ b/chromium/media/cast/cast_thread.h
@@ -0,0 +1,71 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CAST_THREAD_H_
+#define MEDIA_CAST_CAST_THREAD_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+class CastThread : public base::RefCountedThreadSafe<CastThread> {
+ public:
+ // An enumeration of the cast threads.
+ enum ThreadId {
+ // The main thread is where the cast system is configured and where timers
+ // and network IO is performed.
+ MAIN,
+ // The audio encoder thread is where all send side audio processing is done,
+ // primarily encoding but also re-sampling.
+ AUDIO_ENCODER,
+ // The audio decoder thread is where all receive side audio processing is
+ // done, primarily decoding but also error concealment and re-sampling.
+ AUDIO_DECODER,
+ // The video encoder thread is where the video encode processing is done.
+ VIDEO_ENCODER,
+ // The video decoder thread is where the video decode processing is done.
+ VIDEO_DECODER,
+ };
+
+ CastThread(scoped_refptr<base::TaskRunner> main_thread_proxy,
+ scoped_refptr<base::TaskRunner> audio_encode_thread_proxy,
+ scoped_refptr<base::TaskRunner> audio_decode_thread_proxy,
+ scoped_refptr<base::TaskRunner> video_encode_thread_proxy,
+ scoped_refptr<base::TaskRunner> video_decode_thread_proxy);
+
+ // These are the same methods in message_loop.h, but are guaranteed to either
+ // get posted to the MessageLoop if it's still alive, or be deleted otherwise.
+ // They return true iff the thread existed and the task was posted. Note that
+ // even if the task is posted, there's no guarantee that it will run, since
+ // the target thread may already have a Quit message in its queue.
+ bool PostTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task);
+
+ bool PostDelayedTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay);
+
+ private:
+ scoped_refptr<base::TaskRunner> GetMessageTaskRunnerForThread(
+ ThreadId identifier);
+
+ scoped_refptr<base::TaskRunner> main_thread_proxy_;
+ scoped_refptr<base::TaskRunner> audio_encode_thread_proxy_;
+ scoped_refptr<base::TaskRunner> audio_decode_thread_proxy_;
+ scoped_refptr<base::TaskRunner> video_encode_thread_proxy_;
+ scoped_refptr<base::TaskRunner> video_decode_thread_proxy_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastThread);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_THREAD_H_
diff --git a/chromium/media/cast/congestion_control/congestion_control.cc b/chromium/media/cast/congestion_control/congestion_control.cc
new file mode 100644
index 00000000000..f8ca98c2c9a
--- /dev/null
+++ b/chromium/media/cast/congestion_control/congestion_control.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/congestion_control/congestion_control.h"
+
+#include "base/logging.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kCongestionControlMinChangeIntervalMs = 10;
+static const int64 kCongestionControlMaxChangeIntervalMs = 100;
+
+// At 10 ms RTT TCP Reno would ramp 1500 * 8 * 100 = 1200 Kbit/s.
+// NACK is sent after a maximum of 10 ms.
+static const int kCongestionControlMaxBitrateIncreasePerMillisecond = 1200;
+
+static const int64 kMaxElapsedTimeMs = kCongestionControlMaxChangeIntervalMs;
+
+CongestionControl::CongestionControl(float congestion_control_back_off,
+ uint32 max_bitrate_configured,
+ uint32 min_bitrate_configured,
+ uint32 start_bitrate)
+ : congestion_control_back_off_(congestion_control_back_off),
+ max_bitrate_configured_(max_bitrate_configured),
+ min_bitrate_configured_(min_bitrate_configured),
+ bitrate_(start_bitrate),
+ default_tick_clock_(new base::DefaultTickClock()),
+ clock_(default_tick_clock_.get()) {
+ DCHECK_GT(congestion_control_back_off, 0.0f) << "Invalid config";
+ DCHECK_LT(congestion_control_back_off, 1.0f) << "Invalid config";
+ DCHECK_GE(max_bitrate_configured, min_bitrate_configured) << "Invalid config";
+ DCHECK_GE(max_bitrate_configured, start_bitrate) << "Invalid config";
+ DCHECK_GE(start_bitrate, min_bitrate_configured) << "Invalid config";
+}
+
+bool CongestionControl::OnAck(base::TimeDelta rtt, uint32* new_bitrate) {
+ base::TimeTicks now = clock_->NowTicks();
+
+ // First feedback?
+ if (time_last_increase_.is_null()) {
+ time_last_increase_ = now;
+ time_last_decrease_ = now;
+ return false;
+ }
+ // Are we at the max bitrate?
+ if (max_bitrate_configured_ == bitrate_) return false;
+
+ // Make sure RTT is never less than 1 ms.
+ rtt = std::max(rtt, base::TimeDelta::FromMilliseconds(1));
+
+ base::TimeDelta elapsed_time = std::min(now - time_last_increase_,
+ base::TimeDelta::FromMilliseconds(kMaxElapsedTimeMs));
+ base::TimeDelta change_interval = std::max(rtt,
+ base::TimeDelta::FromMilliseconds(kCongestionControlMinChangeIntervalMs));
+ change_interval = std::min(change_interval,
+ base::TimeDelta::FromMilliseconds(kCongestionControlMaxChangeIntervalMs));
+
+ // Have enough time have passed?
+ if (elapsed_time < change_interval) return false;
+
+ time_last_increase_ = now;
+
+ // One packet per RTT multiplied by the elapsed time fraction.
+ // 1500 * 8 * (1000 / rtt_ms) * (elapsed_time_ms / 1000) =>
+ // 1500 * 8 * elapsed_time_ms / rtt_ms.
+ uint32 bitrate_increase = (1500 * 8 * elapsed_time.InMilliseconds()) /
+ rtt.InMilliseconds();
+ uint32 max_bitrate_increase =
+ kCongestionControlMaxBitrateIncreasePerMillisecond *
+ elapsed_time.InMilliseconds();
+ bitrate_increase = std::min(max_bitrate_increase, bitrate_increase);
+ *new_bitrate = std::min(bitrate_increase + bitrate_, max_bitrate_configured_);
+ bitrate_ = *new_bitrate;
+ return true;
+}
+
+bool CongestionControl::OnNack(base::TimeDelta rtt, uint32* new_bitrate) {
+ base::TimeTicks now = clock_->NowTicks();
+
+ // First feedback?
+ if (time_last_decrease_.is_null()) {
+ time_last_increase_ = now;
+ time_last_decrease_ = now;
+ return false;
+ }
+ base::TimeDelta elapsed_time = std::min(now - time_last_decrease_,
+ base::TimeDelta::FromMilliseconds(kMaxElapsedTimeMs));
+ base::TimeDelta change_interval = std::max(rtt,
+ base::TimeDelta::FromMilliseconds(kCongestionControlMinChangeIntervalMs));
+ change_interval = std::min(change_interval,
+ base::TimeDelta::FromMilliseconds(kCongestionControlMaxChangeIntervalMs));
+
+ // Have enough time have passed?
+ if (elapsed_time < change_interval) return false;
+
+ time_last_decrease_ = now;
+ time_last_increase_ = now;
+
+ *new_bitrate = std::max(
+ static_cast<uint32>(bitrate_ * congestion_control_back_off_),
+ min_bitrate_configured_);
+
+ bitrate_ = *new_bitrate;
+ return true;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/congestion_control/congestion_control.gypi b/chromium/media/cast/congestion_control/congestion_control.gypi
new file mode 100644
index 00000000000..9f1accf3f27
--- /dev/null
+++ b/chromium/media/cast/congestion_control/congestion_control.gypi
@@ -0,0 +1,24 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'congestion_control',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'congestion_control.h',
+ 'congestion_control.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ ],
+ },
+ ],
+}
+
diff --git a/chromium/media/cast/congestion_control/congestion_control.h b/chromium/media/cast/congestion_control/congestion_control.h
new file mode 100644
index 00000000000..f1b9b280dcc
--- /dev/null
+++ b/chromium/media/cast/congestion_control/congestion_control.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
+#define MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+class CongestionControl {
+ public:
+ CongestionControl(float congestion_control_back_off,
+ uint32 max_bitrate_configured,
+ uint32 min_bitrate_configured,
+ uint32 start_bitrate);
+
+ virtual ~CongestionControl() {}
+
+ // Don't call OnAck if the same message contain a NACK.
+ // Returns true if the bitrate have changed.
+ bool OnAck(base::TimeDelta rtt_ms, uint32* new_bitrate);
+
+ // Returns true if the bitrate have changed.
+ bool OnNack(base::TimeDelta rtt_ms, uint32* new_bitrate);
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ }
+
+ private:
+ const float congestion_control_back_off_;
+ const uint32 max_bitrate_configured_;
+ const uint32 min_bitrate_configured_;
+ uint32 bitrate_;
+ base::TimeTicks time_last_increase_;
+ base::TimeTicks time_last_decrease_;
+
+ scoped_ptr<base::TickClock> default_tick_clock_;
+ base::TickClock* clock_;
+
+ DISALLOW_COPY_AND_ASSIGN(CongestionControl);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
diff --git a/chromium/media/cast/congestion_control/congestion_control_unittest.cc b/chromium/media/cast/congestion_control/congestion_control_unittest.cc
new file mode 100644
index 00000000000..eff0a8c1e6f
--- /dev/null
+++ b/chromium/media/cast/congestion_control/congestion_control_unittest.cc
@@ -0,0 +1,139 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/congestion_control/congestion_control.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kMaxBitrateConfigured = 5000000;
+static const uint32 kMinBitrateConfigured = 500000;
+static const uint32 kStartBitrate = 2000000;
+static const int64 kStartMillisecond = 123456789;
+static const int64 kRttMs = 20;
+static const int64 kAckRateMs = 33;
+static const int64 kNackRateMs = 10;
+
+class CongestionControlTest : public ::testing::Test {
+ protected:
+ CongestionControlTest()
+ : congestion_control_(kDefaultCongestionControlBackOff,
+ kMaxBitrateConfigured,
+ kMinBitrateConfigured,
+ kStartBitrate) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ congestion_control_.set_clock(&testing_clock_);
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ CongestionControl congestion_control_;
+};
+
+TEST_F(CongestionControlTest, Max) {
+ uint32 new_bitrate = 0;
+ base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+
+ uint32 expected_increase_bitrate = 0;
+
+ // Expected time is 5 seconds. 500000 - 2000000 = 5 * 1500 * 8 * (1000 / 20).
+ for (int i = 0; i < 151; ++i) {
+ testing_clock_.Advance(ack_rate);
+ EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
+ expected_increase_bitrate += 1500 * 8 * kAckRateMs / kRttMs;
+ EXPECT_EQ(kStartBitrate + expected_increase_bitrate, new_bitrate);
+ }
+ testing_clock_.Advance(ack_rate);
+ EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
+ EXPECT_EQ(kMaxBitrateConfigured, new_bitrate);
+}
+
+TEST_F(CongestionControlTest, Min) {
+ uint32 new_bitrate = 0;
+ base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ EXPECT_FALSE(congestion_control_.OnNack(rtt, &new_bitrate));
+
+ uint32 expected_decrease_bitrate = kStartBitrate;
+
+ // Expected number is 10. 2000 * 0.875^10 <= 500.
+ for (int i = 0; i < 10; ++i) {
+ testing_clock_.Advance(ack_rate);
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ expected_decrease_bitrate = static_cast<uint32>(
+ expected_decrease_bitrate * kDefaultCongestionControlBackOff);
+ EXPECT_EQ(expected_decrease_bitrate, new_bitrate);
+ }
+ testing_clock_.Advance(ack_rate);
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ EXPECT_EQ(kMinBitrateConfigured, new_bitrate);
+}
+
+TEST_F(CongestionControlTest, Timing) {
+ base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ uint32 new_bitrate = 0;
+ uint32 expected_bitrate = kStartBitrate;
+
+ EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+
+ testing_clock_.Advance(ack_rate);
+ EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
+ expected_bitrate += 1500 * 8 * kAckRateMs / kRttMs;
+ EXPECT_EQ(expected_bitrate, new_bitrate);
+
+ // We should back immediately.
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ expected_bitrate = static_cast<uint32>(
+ expected_bitrate * kDefaultCongestionControlBackOff);
+ EXPECT_EQ(expected_bitrate, new_bitrate);
+
+ // Less than one RTT have passed don't back again.
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_FALSE(congestion_control_.OnNack(rtt, &new_bitrate));
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ expected_bitrate = static_cast<uint32>(
+ expected_bitrate * kDefaultCongestionControlBackOff);
+ EXPECT_EQ(expected_bitrate, new_bitrate);
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
+ expected_bitrate += 1500 * 8 * 20 / kRttMs;
+ EXPECT_EQ(expected_bitrate, new_bitrate);
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
+ expected_bitrate += 1500 * 8 * 20 / kRttMs;
+ EXPECT_EQ(expected_bitrate, new_bitrate);
+
+ // Test long elapsed time (300 ms).
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(300));
+ EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
+ expected_bitrate += 1500 * 8 * 100 / kRttMs;
+ EXPECT_EQ(expected_bitrate, new_bitrate);
+
+ // Test many short elapsed time (1 ms).
+ for (int i = 0; i < 19; ++i) {
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1));
+ EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+ }
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1));
+ EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
+ expected_bitrate += 1500 * 8 * 20 / kRttMs;
+ EXPECT_EQ(expected_bitrate, new_bitrate);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/framer/cast_message_builder.cc b/chromium/media/cast/framer/cast_message_builder.cc
new file mode 100644
index 00000000000..eec12112e03
--- /dev/null
+++ b/chromium/media/cast/framer/cast_message_builder.cc
@@ -0,0 +1,193 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/framer/cast_message_builder.h"
+
+namespace media {
+namespace cast {
+
+static const uint16 kCompleteFrameLost = 0xffff;
+
+CastMessageBuilder::CastMessageBuilder(
+ RtpPayloadFeedback* incoming_payload_feedback,
+ FrameIdMap* frame_id_map,
+ uint32 media_ssrc,
+ bool decoder_faster_than_max_frame_rate,
+ int max_unacked_frames)
+ : cast_feedback_(incoming_payload_feedback),
+ frame_id_map_(frame_id_map),
+ media_ssrc_(media_ssrc),
+ decoder_faster_than_max_frame_rate_(decoder_faster_than_max_frame_rate),
+ max_unacked_frames_(max_unacked_frames),
+ cast_msg_(media_ssrc),
+ waiting_for_key_frame_(true),
+ slowing_down_ack_(false),
+ acked_last_frame_(true),
+ last_acked_frame_id_(kStartFrameId),
+ default_tick_clock_(new base::DefaultTickClock()),
+ clock_(default_tick_clock_.get()) {
+ cast_msg_.ack_frame_id_ = kStartFrameId;
+}
+
+CastMessageBuilder::~CastMessageBuilder() {}
+
+void CastMessageBuilder::CompleteFrameReceived(uint8 frame_id,
+ bool is_key_frame) {
+ if (last_update_time_.is_null()) {
+ // Our first update.
+ last_update_time_ = clock_->NowTicks();
+ }
+ if (waiting_for_key_frame_) {
+ if (!is_key_frame) {
+ // Ignore that we have received this complete frame since we are
+ // waiting on a key frame.
+ return;
+ }
+ waiting_for_key_frame_ = false;
+ cast_msg_.missing_frames_and_packets_.clear();
+ cast_msg_.ack_frame_id_ = frame_id;
+ last_update_time_ = clock_->NowTicks();
+ // We might have other complete frames waiting after we receive the last
+ // packet in the key-frame.
+ UpdateAckMessage();
+ } else {
+ if (!UpdateAckMessage())
+ return;
+
+ BuildPacketList();
+ }
+ // Send cast message.
+ cast_feedback_->CastFeedback(cast_msg_);
+}
+
+bool CastMessageBuilder::UpdateAckMessage() {
+ if (!decoder_faster_than_max_frame_rate_) {
+ int complete_frame_count = frame_id_map_->NumberOfCompleteFrames();
+ if (complete_frame_count > max_unacked_frames_) {
+ // We have too many frames pending in our framer; slow down ACK.
+ slowing_down_ack_ = true;
+ } else if (complete_frame_count <= 1) {
+ // We are down to one or less frames in our framer; ACK normally.
+ slowing_down_ack_ = false;
+ }
+ }
+ if (slowing_down_ack_) {
+ // We are slowing down acknowledgment by acknowledging every other frame.
+ if (acked_last_frame_) {
+ acked_last_frame_ = false;
+ } else {
+ acked_last_frame_ = true;
+ last_acked_frame_id_++;
+ // Note: frame skipping and slowdown ACK is not supported at the same
+ // time; and it's not needed since we can skip frames to catch up.
+ }
+ } else {
+ uint8 frame_id = frame_id_map_->LastContinuousFrame();
+
+ // Is it a new frame?
+ if (last_acked_frame_id_ == frame_id) return false;
+
+ last_acked_frame_id_ = frame_id;
+ acked_last_frame_ = true;
+ }
+ cast_msg_.ack_frame_id_ = last_acked_frame_id_;
+ cast_msg_.missing_frames_and_packets_.clear();
+ last_update_time_ = clock_->NowTicks();
+ return true;
+}
+
+bool CastMessageBuilder::TimeToSendNextCastMessage(
+ base::TimeTicks* time_to_send) {
+ // We haven't received any packets.
+ if (last_update_time_.is_null() && frame_id_map_->Empty()) return false;
+
+ *time_to_send = last_update_time_ +
+ base::TimeDelta::FromMilliseconds(kCastMessageUpdateIntervalMs);
+ return true;
+}
+
+void CastMessageBuilder::UpdateCastMessage() {
+ RtcpCastMessage message(media_ssrc_);
+ if (!UpdateCastMessageInternal(&message)) return;
+
+ // Send cast message.
+ cast_feedback_->CastFeedback(message);
+}
+
+void CastMessageBuilder::Reset() {
+ waiting_for_key_frame_ = true;
+ cast_msg_.ack_frame_id_ = kStartFrameId;
+ cast_msg_.missing_frames_and_packets_.clear();
+ time_last_nacked_map_.clear();
+}
+
+bool CastMessageBuilder::UpdateCastMessageInternal(RtcpCastMessage* message) {
+ if (last_update_time_.is_null()) {
+ if (!frame_id_map_->Empty()) {
+ // We have received packets.
+ last_update_time_ = clock_->NowTicks();
+ }
+ return false;
+ }
+ // Is it time to update the cast message?
+ base::TimeTicks now = clock_->NowTicks();
+ if (now - last_update_time_ <
+ base::TimeDelta::FromMilliseconds(kCastMessageUpdateIntervalMs)) {
+ return false;
+ }
+ last_update_time_ = now;
+
+ UpdateAckMessage(); // Needed to cover when a frame is skipped.
+ BuildPacketList();
+ *message = cast_msg_;
+ return true;
+}
+
+void CastMessageBuilder::BuildPacketList() {
+ base::TimeTicks now = clock_->NowTicks();
+
+ // Clear message NACK list.
+ cast_msg_.missing_frames_and_packets_.clear();
+
+ // Are we missing packets?
+ if (frame_id_map_->Empty()) return;
+
+ uint8 newest_frame_id = frame_id_map_->NewestFrameId();
+ uint8 next_expected_frame_id =
+ static_cast<uint8>(cast_msg_.ack_frame_id_ + 1);
+
+ // Iterate over all frames.
+ for (; !IsNewerFrameId(next_expected_frame_id, newest_frame_id);
+ ++next_expected_frame_id) {
+ TimeLastNackMap::iterator it =
+ time_last_nacked_map_.find(next_expected_frame_id);
+ if (it != time_last_nacked_map_.end()) {
+ // We have sent a NACK in this frame before, make sure enough time have
+ // passed.
+ if (now - it->second <
+ base::TimeDelta::FromMilliseconds(kNackRepeatIntervalMs)) {
+ continue;
+ }
+ }
+
+ PacketIdSet missing;
+ if (frame_id_map_->FrameExists(next_expected_frame_id)) {
+ bool last_frame = (newest_frame_id == next_expected_frame_id);
+ frame_id_map_->GetMissingPackets(next_expected_frame_id, last_frame,
+ &missing);
+ if (!missing.empty()) {
+ time_last_nacked_map_[next_expected_frame_id] = now;
+ cast_msg_.missing_frames_and_packets_.insert(
+ std::make_pair(next_expected_frame_id, missing));
+ }
+ } else {
+ time_last_nacked_map_[next_expected_frame_id] = now;
+ missing.insert(kCompleteFrameLost);
+ cast_msg_.missing_frames_and_packets_[next_expected_frame_id] = missing;
+ }
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/framer/cast_message_builder.h b/chromium/media/cast/framer/cast_message_builder.h
new file mode 100644
index 00000000000..b941178b633
--- /dev/null
+++ b/chromium/media/cast/framer/cast_message_builder.h
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Handles NACK list and manages ACK.
+
+#ifndef MEDIA_CAST_FRAMER_CAST_MESSAGE_BUILDER_H_
+#define MEDIA_CAST_FRAMER_CAST_MESSAGE_BUILDER_H_
+
+#include <map>
+
+#include "media/cast/framer/frame_id_map.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtpPayloadFeedback;
+
+typedef std::map<uint8, base::TimeTicks> TimeLastNackMap;
+
+class CastMessageBuilder {
+ public:
+ CastMessageBuilder(RtpPayloadFeedback* incoming_payload_feedback,
+ FrameIdMap* frame_id_map,
+ uint32 media_ssrc,
+ bool decoder_faster_than_max_frame_rate,
+ int max_unacked_frames);
+ ~CastMessageBuilder();
+
+ void CompleteFrameReceived(uint8 frame_id, bool is_key_frame);
+ bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
+ void UpdateCastMessage();
+ void Reset();
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ }
+
+ private:
+ bool UpdateAckMessage();
+ void BuildPacketList();
+ bool UpdateCastMessageInternal(RtcpCastMessage* message);
+
+ RtpPayloadFeedback* const cast_feedback_;
+
+ // CastMessageBuilder has only const access to the frame id mapper.
+ const FrameIdMap* const frame_id_map_;
+ const uint32 media_ssrc_;
+ const bool decoder_faster_than_max_frame_rate_;
+ const int max_unacked_frames_;
+
+ RtcpCastMessage cast_msg_;
+ base::TimeTicks last_update_time_;
+ bool waiting_for_key_frame_;
+
+ TimeLastNackMap time_last_nacked_map_;
+
+ bool slowing_down_ack_;
+ bool acked_last_frame_;
+ uint8 last_acked_frame_id_;
+
+ scoped_ptr<base::TickClock> default_tick_clock_;
+ base::TickClock* clock_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastMessageBuilder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_FRAMER_CAST_MESSAGE_BUILDER_H_
diff --git a/chromium/media/cast/framer/cast_message_builder_unittest.cc b/chromium/media/cast/framer/cast_message_builder_unittest.cc
new file mode 100644
index 00000000000..f9bb0668d82
--- /dev/null
+++ b/chromium/media/cast/framer/cast_message_builder_unittest.cc
@@ -0,0 +1,503 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/framer/cast_message_builder.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kSsrc = 0x1234;
+static const uint32 kShortTimeIncrementMs = 10;
+static const uint32 kLongTimeIncrementMs = 40;
+static const int64 kStartMillisecond = 123456789;
+
+typedef std::map<uint8, int> MissingPacketsMap;
+
+class NackFeedbackVerification : public RtpPayloadFeedback {
+ public:
+ NackFeedbackVerification()
+ : triggered_(false),
+ missing_packets_(),
+ last_frame_acked_(0) {}
+
+
+ virtual void CastFeedback(const RtcpCastMessage& cast_feedback) OVERRIDE {
+ EXPECT_EQ(kSsrc, cast_feedback.media_ssrc_);
+
+ last_frame_acked_ = cast_feedback.ack_frame_id_;
+
+ MissingFramesAndPacketsMap::const_iterator frame_it =
+ cast_feedback.missing_frames_and_packets_.begin();
+
+ // Keep track of the number of missing packets per frame.
+ missing_packets_.clear();
+ while (frame_it != cast_feedback.missing_frames_and_packets_.end()) {
+ missing_packets_.insert(
+ std::make_pair(frame_it->first, frame_it->second.size()));
+ ++frame_it;
+ }
+ triggered_ = true;
+ }
+
+ int num_missing_packets(uint8 frame_id) {
+ MissingPacketsMap::iterator it;
+ it = missing_packets_.find(frame_id);
+ if (it == missing_packets_.end()) return 0;
+
+ return it->second;
+ }
+
+ // Holds value for one call.
+ bool triggered() {
+ bool ret_val = triggered_;
+ triggered_ = false;
+ return ret_val;
+ }
+
+ uint8 last_frame_acked() { return last_frame_acked_; }
+
+ private:
+ bool triggered_;
+ MissingPacketsMap missing_packets_; // Missing packets per frame.
+ uint8 last_frame_acked_;
+};
+
+class CastMessageBuilderTest : public ::testing::Test {
+ protected:
+ CastMessageBuilderTest()
+ : cast_msg_builder_(new CastMessageBuilder(&feedback_,
+ &frame_id_map_,
+ kSsrc,
+ true,
+ 0)) {
+ rtp_header_.webrtc.header.ssrc = kSsrc;
+ rtp_header_.is_key_frame = false;
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ cast_msg_builder_->set_clock(&testing_clock_);
+ }
+
+ ~CastMessageBuilderTest() {}
+
+ void SetFrameId(uint8 frame_id) {
+ rtp_header_.frame_id = frame_id;
+ }
+
+ void SetPacketId(uint16 packet_id) {
+ rtp_header_.packet_id = packet_id;
+ }
+
+ void SetMaxPacketId(uint16 max_packet_id) {
+ rtp_header_.max_packet_id = max_packet_id;
+ }
+
+ void SetKeyFrame(bool is_key) {
+ rtp_header_.is_key_frame = is_key;
+ }
+
+ void SetReferenceFrameId(uint8 reference_frame_id) {
+ rtp_header_.is_reference = true;
+ rtp_header_.reference_frame_id = reference_frame_id;
+ }
+
+ void InsertPacket() {
+ bool complete = false;
+ frame_id_map_.InsertPacket(rtp_header_, &complete);
+ if (complete) {
+ cast_msg_builder_->CompleteFrameReceived(rtp_header_.frame_id,
+ rtp_header_.is_key_frame);
+ }
+ cast_msg_builder_->UpdateCastMessage();
+ }
+
+ void SetDecoderSlowerThanMaxFrameRate(int max_unacked_frames) {
+ cast_msg_builder_.reset(new CastMessageBuilder(&feedback_,
+ &frame_id_map_,
+ kSsrc,
+ false,
+ max_unacked_frames));
+ }
+
+ NackFeedbackVerification feedback_;
+ scoped_ptr<CastMessageBuilder> cast_msg_builder_;
+ RtpCastHeader rtp_header_;
+ FrameIdMap frame_id_map_;
+ base::SimpleTestTickClock testing_clock_;
+};
+
+TEST_F(CastMessageBuilderTest, StartWithAKeyFrame) {
+ SetFrameId(3);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ InsertPacket();
+ // Should not trigger ack.
+ EXPECT_FALSE(feedback_.triggered());
+ SetFrameId(5);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+ frame_id_map_.RemoveOldFrames(5); // Simulate 5 being pulled for rendering.
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ cast_msg_builder_->UpdateCastMessage();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(5, feedback_.last_frame_acked());
+}
+
+TEST_F(CastMessageBuilderTest, OneFrameNackList) {
+ SetFrameId(0);
+ SetPacketId(4);
+ SetMaxPacketId(10);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ EXPECT_FALSE(feedback_.triggered());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetPacketId(5);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(4, feedback_.num_missing_packets(0));
+}
+
+TEST_F(CastMessageBuilderTest, CompleteFrameMissing) {
+ // TODO(mikhal): Add indication.
+ SetFrameId(0);
+ SetPacketId(2);
+ SetMaxPacketId(5);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(2);
+ SetPacketId(2);
+ SetMaxPacketId(5);
+ InsertPacket();
+}
+
+TEST_F(CastMessageBuilderTest, FastForwardAck) {
+ SetFrameId(1);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ InsertPacket();
+ EXPECT_FALSE(feedback_.triggered());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(2);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(255, feedback_.last_frame_acked());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(2, feedback_.last_frame_acked());
+}
+
+TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
+ SetFrameId(1);
+ SetPacketId(0);
+ SetMaxPacketId(1);
+ InsertPacket();
+ EXPECT_FALSE(feedback_.triggered());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(2);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(3);
+ SetPacketId(0);
+ SetMaxPacketId(5);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(255, feedback_.last_frame_acked());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(5);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ frame_id_map_.RemoveOldFrames(5); // Simulate 5 being pulled for rendering.
+ cast_msg_builder_->UpdateCastMessage();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(5, feedback_.last_frame_acked());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ SetFrameId(1);
+ SetPacketId(1);
+ SetMaxPacketId(1);
+ InsertPacket();
+ EXPECT_FALSE(feedback_.triggered());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(5, feedback_.last_frame_acked());
+}
+
+TEST_F(CastMessageBuilderTest, WrapFastForward) {
+ SetFrameId(254);
+ SetPacketId(0);
+ SetMaxPacketId(1);
+ SetKeyFrame(true);
+ InsertPacket();
+ EXPECT_FALSE(feedback_.triggered());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(255);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(false);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(253, feedback_.last_frame_acked());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(false);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(253, feedback_.last_frame_acked());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetFrameId(254);
+ SetPacketId(1);
+ SetMaxPacketId(1);
+ SetKeyFrame(true);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(0, feedback_.last_frame_acked());
+}
+
+TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacket) {
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(20);
+ SetKeyFrame(true);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetPacketId(5);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(4, feedback_.num_missing_packets(0));
+}
+
+TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextFrame) {
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(20);
+ SetKeyFrame(true);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetPacketId(5);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(4, feedback_.num_missing_packets(0));
+ SetFrameId(1);
+ SetMaxPacketId(2);
+ SetPacketId(0);
+ SetKeyFrame(false);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(21 - 2, feedback_.num_missing_packets(0));
+}
+
+TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextKey) {
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(20);
+ SetKeyFrame(true);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ SetPacketId(5);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(4, feedback_.num_missing_packets(0));
+ SetFrameId(1);
+ SetMaxPacketId(0);
+ SetPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(0, feedback_.num_missing_packets(0));
+}
+
+TEST_F(CastMessageBuilderTest, Reset) {
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ cast_msg_builder_->Reset();
+ frame_id_map_.Clear();
+ // Should reset nack list state and request a key frame.
+ cast_msg_builder_->UpdateCastMessage();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(0, feedback_.num_missing_packets(0));
+}
+
+TEST_F(CastMessageBuilderTest, DeltaAfterReset) {
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(0, feedback_.num_missing_packets(0));
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ cast_msg_builder_->Reset();
+ SetFrameId(1);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ EXPECT_FALSE(feedback_.triggered());
+}
+
+TEST_F(CastMessageBuilderTest, BasicRps) {
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(0, feedback_.last_frame_acked());
+ SetFrameId(3);
+ SetKeyFrame(false);
+ SetReferenceFrameId(0);
+ InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(0, feedback_.last_frame_acked());
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
+ frame_id_map_.RemoveOldFrames(3); // Simulate 3 being pulled for rendering.
+ cast_msg_builder_->UpdateCastMessage();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(3, feedback_.last_frame_acked());
+}
+
+TEST_F(CastMessageBuilderTest, InOrderRps) {
+ // Create a pattern - skip to rps, and don't look back.
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(0, feedback_.last_frame_acked());
+ SetFrameId(1);
+ SetPacketId(0);
+ SetMaxPacketId(1);
+ SetKeyFrame(false);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ EXPECT_FALSE(feedback_.triggered());
+ SetFrameId(3);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(false);
+ SetReferenceFrameId(0);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ frame_id_map_.RemoveOldFrames(3); // Simulate 3 being pulled for rendering.
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ cast_msg_builder_->UpdateCastMessage();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(3, feedback_.last_frame_acked());
+ // Make an old frame complete - should not trigger an ack.
+ SetFrameId(1);
+ SetPacketId(1);
+ SetMaxPacketId(1);
+ SetKeyFrame(false);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ EXPECT_FALSE(feedback_.triggered());
+ EXPECT_EQ(3, feedback_.last_frame_acked());
+}
+
+TEST_F(CastMessageBuilderTest, SlowDownAck) {
+ SetDecoderSlowerThanMaxFrameRate(3);
+ SetFrameId(0);
+ SetPacketId(0);
+ SetMaxPacketId(0);
+ SetKeyFrame(true);
+ InsertPacket();
+
+ int frame_id;
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ SetKeyFrame(false);
+ for (frame_id = 1; frame_id < 3; ++frame_id) {
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(frame_id - 1, feedback_.last_frame_acked());
+ SetFrameId(frame_id);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ }
+ // We should now have entered the slowdown ACK state.
+ uint8_t expected_frame_id = 1;
+ for (; frame_id < 10; ++frame_id) {
+ if (frame_id % 2) ++expected_frame_id;
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(expected_frame_id, feedback_.last_frame_acked());
+ SetFrameId(frame_id);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ }
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(expected_frame_id, feedback_.last_frame_acked());
+
+ // Simulate frame_id being pulled for rendering.
+ frame_id_map_.RemoveOldFrames(frame_id);
+ // We should now leave the slowdown ACK state.
+ ++frame_id;
+ SetFrameId(frame_id);
+ InsertPacket();
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(frame_id, feedback_.last_frame_acked());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/framer/frame_buffer.cc b/chromium/media/cast/framer/frame_buffer.cc
new file mode 100644
index 00000000000..ed7e11f0ce0
--- /dev/null
+++ b/chromium/media/cast/framer/frame_buffer.cc
@@ -0,0 +1,103 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/framer/frame_buffer.h"
+
+namespace media {
+namespace cast {
+
+FrameBuffer::FrameBuffer()
+ : frame_id_(0),
+ max_packet_id_(0),
+ num_packets_received_(0),
+ is_key_frame_(false),
+ total_data_size_(0),
+ last_referenced_frame_id_(0),
+ packets_() {}
+
+FrameBuffer::~FrameBuffer() {}
+
+void FrameBuffer::InsertPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header) {
+ // Is this the first packet in the frame?
+ if (packets_.empty()) {
+ frame_id_ = rtp_header.frame_id;
+ max_packet_id_ = rtp_header.max_packet_id;
+ is_key_frame_ = rtp_header.is_key_frame;
+ if (rtp_header.is_reference) {
+ last_referenced_frame_id_ = rtp_header.reference_frame_id;
+ } else {
+ last_referenced_frame_id_ = static_cast<uint8>(rtp_header.frame_id - 1);
+ }
+
+ rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
+ }
+ // Is this the correct frame?
+ if (rtp_header.frame_id != frame_id_) return;
+
+ // Insert every packet only once.
+ if (packets_.find(rtp_header.packet_id) != packets_.end()) return;
+
+ std::vector<uint8> data;
+ std::pair<PacketMap::iterator, bool> retval =
+ packets_.insert(make_pair(rtp_header.packet_id, data));
+
+ // Insert the packet.
+ retval.first->second.resize(payload_size);
+ std::copy(payload_data, payload_data + payload_size,
+ retval.first->second.begin());
+
+ ++num_packets_received_;
+ total_data_size_ += payload_size;
+}
+
+bool FrameBuffer::Complete() const {
+ return num_packets_received_ - 1 == max_packet_id_;
+}
+
+bool FrameBuffer::GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
+ uint32* rtp_timestamp) const {
+ if (!Complete()) return false;
+
+ *rtp_timestamp = rtp_timestamp_;
+
+ // Frame is complete -> construct.
+ audio_frame->frame_id = frame_id_;
+
+ // Build the data vector.
+ audio_frame->data.clear();
+ audio_frame->data.reserve(total_data_size_);
+ PacketMap::const_iterator it;
+ for (it = packets_.begin(); it != packets_.end(); ++it) {
+ audio_frame->data.insert(audio_frame->data.end(),
+ it->second.begin(), it->second.end());
+ }
+ return true;
+}
+
+bool FrameBuffer::GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
+ uint32* rtp_timestamp) const {
+ if (!Complete()) return false;
+
+ *rtp_timestamp = rtp_timestamp_;
+
+ // Frame is complete -> construct.
+ video_frame->key_frame = is_key_frame_;
+ video_frame->frame_id = frame_id_;
+ video_frame->last_referenced_frame_id = last_referenced_frame_id_;
+
+ // Build the data vector.
+ video_frame->data.clear();
+ video_frame->data.reserve(total_data_size_);
+ PacketMap::const_iterator it;
+ for (it = packets_.begin(); it != packets_.end(); ++it) {
+ video_frame->data.insert(video_frame->data.end(),
+ it->second.begin(), it->second.end());
+ }
+ return true;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/framer/frame_buffer.h b/chromium/media/cast/framer/frame_buffer.h
new file mode 100644
index 00000000000..d2b52cb409e
--- /dev/null
+++ b/chromium/media/cast/framer/frame_buffer.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_FRAMER_FRAME_BUFFER
+#define MEDIA_CAST_FRAMER_FRAME_BUFFER
+
+#include <map>
+#include <vector>
+
+#include "media/cast/cast_config.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+typedef std::map<uint16, std::vector<uint8> > PacketMap;
+
+class FrameBuffer {
+ public:
+ FrameBuffer();
+ ~FrameBuffer();
+ void InsertPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header);
+ bool Complete() const;
+
+ bool GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
+ uint32* rtp_timestamp) const;
+
+ bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
+ uint32* rtp_timestamp) const;
+
+ bool is_key_frame() const { return is_key_frame_; }
+ uint8 frame_id() const { return frame_id_; }
+ uint8 last_referenced_frame_id() const { return last_referenced_frame_id_; }
+
+ private:
+ uint8 frame_id_;
+ uint16 max_packet_id_;
+ uint16 num_packets_received_;
+ bool is_key_frame_;
+ int total_data_size_;
+ uint8 last_referenced_frame_id_;
+ uint32 rtp_timestamp_;
+ PacketMap packets_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameBuffer);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_FRAMER_FRAME_BUFFER
diff --git a/chromium/media/cast/framer/frame_buffer_unittest.cc b/chromium/media/cast/framer/frame_buffer_unittest.cc
new file mode 100644
index 00000000000..26998f5fd7e
--- /dev/null
+++ b/chromium/media/cast/framer/frame_buffer_unittest.cc
@@ -0,0 +1,88 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/framer/frame_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+class FrameBufferTest : public ::testing::Test {
+ protected:
+ FrameBufferTest() {}
+
+ ~FrameBufferTest() {}
+
+ void SetUp() {
+ payload_.assign(kIpPacketSize, 0);
+
+ // Build a default one packet frame - populate webrtc header.
+ rtp_header_.is_key_frame = false;
+ rtp_header_.frame_id = 0;
+ rtp_header_.packet_id = 0;
+ rtp_header_.max_packet_id = 0;
+ rtp_header_.is_reference = false;
+ rtp_header_.reference_frame_id = 0;
+ }
+
+ FrameBuffer buffer_;
+ std::vector<uint8> payload_;
+ RtpCastHeader rtp_header_;
+};
+
+TEST_F(FrameBufferTest, EmptyBuffer) {
+ EXPECT_FALSE(buffer_.Complete());
+ EXPECT_FALSE(buffer_.is_key_frame());
+ EncodedVideoFrame frame;
+ uint32 rtp_timestamp;
+ EXPECT_FALSE(buffer_.GetEncodedVideoFrame(&frame, &rtp_timestamp));
+}
+
+TEST_F(FrameBufferTest, DefaultOnePacketFrame) {
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(buffer_.Complete());
+ EXPECT_FALSE(buffer_.is_key_frame());
+ EncodedVideoFrame frame;
+ uint32 rtp_timestamp;
+ EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame, &rtp_timestamp));
+ EXPECT_EQ(payload_.size(), frame.data.size());
+}
+
+TEST_F(FrameBufferTest, MultiplePacketFrame) {
+ rtp_header_.is_key_frame = true;
+ rtp_header_.max_packet_id = 2;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ EXPECT_TRUE(buffer_.Complete());
+ EXPECT_TRUE(buffer_.is_key_frame());
+ EncodedVideoFrame frame;
+ uint32 rtp_timestamp;
+ EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame, &rtp_timestamp));
+ EXPECT_EQ(3 * payload_.size(), frame.data.size());
+}
+
+TEST_F(FrameBufferTest, InCompleteFrame) {
+ rtp_header_.max_packet_id = 4;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ // Increment again - skip packet #2.
+ ++rtp_header_.packet_id;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_FALSE(buffer_.Complete());
+ // Insert missing packet.
+ rtp_header_.packet_id = 2;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(buffer_.Complete());
+}
+
+} // namespace media
+} // namespace cast
diff --git a/chromium/media/cast/framer/frame_id_map.cc b/chromium/media/cast/framer/frame_id_map.cc
new file mode 100644
index 00000000000..cf866845227
--- /dev/null
+++ b/chromium/media/cast/framer/frame_id_map.cc
@@ -0,0 +1,252 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/framer/frame_id_map.h"
+
+#include "base/logging.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+FrameInfo::FrameInfo(uint8 frame_id,
+ uint8 referenced_frame_id,
+ uint16 max_packet_id,
+ bool key_frame)
+ : is_key_frame_(key_frame),
+ frame_id_(frame_id),
+ referenced_frame_id_(referenced_frame_id),
+ max_received_packet_id_(0) {
+ if (max_packet_id > 0) {
+ // Create the set with all packets missing.
+ for (uint16 i = 0; i <= max_packet_id; i++) {
+ missing_packets_.insert(i);
+ }
+ }
+}
+
+FrameInfo::~FrameInfo() {}
+
+bool FrameInfo::InsertPacket(uint16 packet_id) {
+ // Update the last received packet id.
+ if (IsNewerPacketId(packet_id, max_received_packet_id_)) {
+ max_received_packet_id_ = packet_id;
+ }
+ missing_packets_.erase(packet_id);
+ return missing_packets_.empty();
+}
+
+bool FrameInfo::Complete() const {
+ return missing_packets_.empty();
+}
+
+void FrameInfo::GetMissingPackets(bool newest_frame,
+ PacketIdSet* missing_packets) const {
+ if (newest_frame) {
+ // Missing packets capped by max_received_packet_id_.
+ PacketIdSet::const_iterator it_after_last_received =
+ missing_packets_.lower_bound(max_received_packet_id_);
+ missing_packets->insert(missing_packets_.begin(), it_after_last_received);
+ } else {
+ missing_packets->insert(missing_packets_.begin(), missing_packets_.end());
+ }
+}
+
+
+FrameIdMap::FrameIdMap()
+ : waiting_for_key_(true),
+ last_released_frame_(kStartFrameId),
+ newest_frame_id_(kStartFrameId) {
+}
+
+FrameIdMap::~FrameIdMap() {}
+
+bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
+ uint8 frame_id = rtp_header.frame_id;
+ uint8 reference_frame_id;
+ if (rtp_header.is_reference) {
+ reference_frame_id = rtp_header.reference_frame_id;
+ } else {
+ reference_frame_id = static_cast<uint8>(frame_id - 1);
+ }
+
+ if (rtp_header.is_key_frame && waiting_for_key_) {
+ last_released_frame_ = static_cast<uint8>(frame_id - 1);
+ waiting_for_key_ = false;
+ }
+
+ if (IsOlderFrameId(frame_id, last_released_frame_) && !waiting_for_key_) {
+ return false;
+ }
+
+ // Update the last received frame id.
+ if (IsNewerFrameId(frame_id, newest_frame_id_)) {
+ newest_frame_id_ = frame_id;
+ }
+
+ // Does this packet belong to a new frame?
+ FrameMap::iterator it = frame_map_.find(frame_id);
+ if (it == frame_map_.end()) {
+ // New frame.
+ linked_ptr<FrameInfo> frame_info(new FrameInfo(frame_id,
+ reference_frame_id,
+ rtp_header.max_packet_id,
+ rtp_header.is_key_frame));
+ std::pair<FrameMap::iterator, bool> retval =
+ frame_map_.insert(std::make_pair(frame_id, frame_info));
+
+ *complete = retval.first->second->InsertPacket(rtp_header.packet_id);
+ } else {
+ // Insert packet to existing frame.
+ *complete = it->second->InsertPacket(rtp_header.packet_id);
+ }
+ return true;
+}
+
+void FrameIdMap::RemoveOldFrames(uint8 frame_id) {
+ FrameMap::iterator it = frame_map_.begin();
+
+ while (it != frame_map_.end()) {
+ if (IsNewerFrameId(it->first, frame_id)) {
+ ++it;
+ } else {
+ // Older or equal; erase.
+ frame_map_.erase(it++);
+ }
+ }
+ last_released_frame_ = frame_id;
+}
+
+void FrameIdMap::Clear() {
+ frame_map_.clear();
+ waiting_for_key_ = true;
+ last_released_frame_ = kStartFrameId;
+ newest_frame_id_ = kStartFrameId;
+}
+
+uint8 FrameIdMap::NewestFrameId() const {
+ return newest_frame_id_;
+}
+
+bool FrameIdMap::NextContinuousFrame(uint8* frame_id) const {
+ FrameMap::const_iterator it;
+
+ for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
+ if (it->second->Complete() && ContinuousFrame(it->second.get())) {
+ *frame_id = it->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+uint8 FrameIdMap::LastContinuousFrame() const {
+ uint8 last_continuous_frame_id = last_released_frame_;
+ uint8 next_expected_frame = last_released_frame_;
+
+ FrameMap::const_iterator it;
+
+ do {
+ next_expected_frame++;
+ it = frame_map_.find(next_expected_frame);
+ if (it == frame_map_.end()) break;
+ if (!it->second->Complete()) break;
+
+ // We found the next continuous frame.
+ last_continuous_frame_id = it->first;
+ } while (next_expected_frame != newest_frame_id_);
+ return last_continuous_frame_id;
+}
+
+bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint8* frame_id) const {
+ // First check if we have continuous frames.
+ if (NextContinuousFrame(frame_id)) return true;
+
+ // Find the oldest frame.
+ FrameMap::const_iterator it_best_match = frame_map_.end();
+ FrameMap::const_iterator it;
+
+ // Find first complete frame.
+ for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
+ if (it->second->Complete()) {
+ it_best_match = it;
+ break;
+ }
+ }
+ if (it_best_match == frame_map_.end()) return false; // No complete frame.
+
+ ++it;
+ for (; it != frame_map_.end(); ++it) {
+ if (it->second->Complete() &&
+ IsOlderFrameId(it->first, it_best_match->first)) {
+ it_best_match = it;
+ }
+ }
+ *frame_id = it_best_match->first;
+ return true;
+}
+
+bool FrameIdMap::NextVideoFrameAllowingSkippingFrames(uint8* frame_id) const {
+ // Find the oldest decodable frame.
+ FrameMap::const_iterator it_best_match = frame_map_.end();
+ FrameMap::const_iterator it;
+ for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
+ if (it->second->Complete() && DecodableVideoFrame(it->second.get())) {
+ it_best_match = it;
+ }
+ }
+ if (it_best_match == frame_map_.end()) return false;
+
+ *frame_id = it_best_match->first;
+ return true;
+}
+
+bool FrameIdMap::Empty() const {
+ return frame_map_.empty();
+}
+
+int FrameIdMap::NumberOfCompleteFrames() const {
+ int count = 0;
+ FrameMap::const_iterator it;
+ for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
+ if (it->second->Complete()) {
+ ++count;
+ }
+ }
+ return count;
+}
+
+bool FrameIdMap::FrameExists(uint8 frame_id) const {
+ return frame_map_.end() != frame_map_.find(frame_id);
+}
+
+void FrameIdMap::GetMissingPackets(uint8 frame_id,
+ bool last_frame,
+ PacketIdSet* missing_packets) const {
+ FrameMap::const_iterator it = frame_map_.find(frame_id);
+ if (it == frame_map_.end()) return;
+
+ it->second->GetMissingPackets(last_frame, missing_packets);
+}
+
+bool FrameIdMap::ContinuousFrame(FrameInfo* frame) const {
+ DCHECK(frame);
+ if (waiting_for_key_ && !frame->is_key_frame()) return false;
+ return static_cast<uint8>(last_released_frame_ + 1) == frame->frame_id();
+}
+
+bool FrameIdMap::DecodableVideoFrame(FrameInfo* frame) const {
+ if (frame->is_key_frame()) return true;
+ if (waiting_for_key_ && !frame->is_key_frame()) return false;
+
+ // Current frame is not necessarily referencing the last frame.
+ // Do we have the reference frame?
+ if (IsOlderFrameId(frame->referenced_frame_id(), last_released_frame_)) {
+ return true;
+ }
+ return frame->referenced_frame_id() == last_released_frame_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/framer/frame_id_map.h b/chromium/media/cast/framer/frame_id_map.h
new file mode 100644
index 00000000000..6bf72a0d692
--- /dev/null
+++ b/chromium/media/cast/framer/frame_id_map.h
@@ -0,0 +1,93 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_FRAMER_FRAME_ID_MAP_H_
+#define MEDIA_CAST_FRAMER_FRAME_ID_MAP_H_
+
+#include <map>
+#include <set>
+
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class FrameInfo {
+ public:
+ FrameInfo(uint8 frame_id,
+ uint8 referenced_frame_id,
+ uint16 max_packet_id,
+ bool key_frame);
+ ~FrameInfo();
+
+ // Returns true if frame is complete after the insert.
+ bool InsertPacket(uint16 packet_id);
+ bool Complete() const;
+ void GetMissingPackets(bool newest_frame,
+ PacketIdSet* missing_packets) const;
+
+ bool is_key_frame() const { return is_key_frame_; }
+ uint8 frame_id() const { return frame_id_; }
+ uint8 referenced_frame_id() const { return referenced_frame_id_; }
+
+ private:
+ const bool is_key_frame_;
+ const uint8 frame_id_;
+ const uint8 referenced_frame_id_;
+
+ uint16 max_received_packet_id_;
+ PacketIdSet missing_packets_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameInfo);
+};
+
+typedef std::map<uint8, linked_ptr<FrameInfo> > FrameMap;
+
+class FrameIdMap {
+ public:
+ FrameIdMap();
+ ~FrameIdMap();
+
+ // Returns false if not a valid (old) packet, otherwise returns true.
+ bool InsertPacket(const RtpCastHeader& rtp_header, bool* complete);
+
+ bool Empty() const;
+ bool FrameExists(uint8 frame_id) const;
+ uint8 NewestFrameId() const;
+
+ void RemoveOldFrames(uint8 frame_id);
+ void Clear();
+
+ // Identifies the next frame to be released (rendered).
+ bool NextContinuousFrame(uint8* frame_id) const;
+ uint8 LastContinuousFrame() const;
+
+ bool NextAudioFrameAllowingMissingFrames(uint8* frame_id) const;
+ bool NextVideoFrameAllowingSkippingFrames(uint8* frame_id) const;
+
+ int NumberOfCompleteFrames() const;
+ void GetMissingPackets(uint8 frame_id,
+ bool last_frame,
+ PacketIdSet* missing_packets) const;
+
+ private:
+ bool ContinuousFrame(FrameInfo* frame) const;
+ bool DecodableVideoFrame(FrameInfo* frame) const;
+
+ FrameMap frame_map_;
+ bool waiting_for_key_;
+ uint8 last_released_frame_;
+ uint8 newest_frame_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameIdMap);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_FRAMER_FRAME_ID_MAP_H_
diff --git a/chromium/media/cast/framer/framer.cc b/chromium/media/cast/framer/framer.cc
new file mode 100644
index 00000000000..95048209dcd
--- /dev/null
+++ b/chromium/media/cast/framer/framer.cc
@@ -0,0 +1,146 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/framer/framer.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+typedef FrameList::const_iterator ConstFrameIterator;
+
+Framer::Framer(RtpPayloadFeedback* incoming_payload_feedback,
+ uint32 ssrc,
+ bool decoder_faster_than_max_frame_rate,
+ int max_unacked_frames)
+ : decoder_faster_than_max_frame_rate_(decoder_faster_than_max_frame_rate),
+ clock_(&default_tick_clock_),
+ cast_msg_builder_(new CastMessageBuilder(incoming_payload_feedback,
+ &frame_id_map_, ssrc, decoder_faster_than_max_frame_rate,
+ max_unacked_frames)) {
+ DCHECK(incoming_payload_feedback) << "Invalid argument";
+}
+
+Framer::~Framer() {}
+
+void Framer::InsertPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header) {
+ bool complete = false;
+ if (!frame_id_map_.InsertPacket(rtp_header, &complete)) return;
+
+ // Does this packet belong to a new frame?
+ FrameList::iterator it = frames_.find(rtp_header.frame_id);
+ if (it == frames_.end()) {
+ // New frame.
+ linked_ptr<FrameBuffer> frame_buffer(new FrameBuffer());
+ frame_buffer->InsertPacket(payload_data, payload_size, rtp_header);
+ frames_.insert(std::make_pair(rtp_header.frame_id, frame_buffer));
+ } else {
+ // Insert packet to existing frame buffer.
+ it->second->InsertPacket(payload_data, payload_size, rtp_header);
+ }
+
+ if (complete) {
+ // ACK as soon as possible.
+ cast_msg_builder_->CompleteFrameReceived(rtp_header.frame_id,
+ rtp_header.is_key_frame);
+ }
+}
+
+// This does not release the frame.
+bool Framer::GetEncodedAudioFrame(const base::TimeTicks& timeout,
+ EncodedAudioFrame* audio_frame,
+ uint32* rtp_timestamp,
+ bool* next_frame) {
+ uint8 frame_id;
+ // Find frame id.
+ if (frame_id_map_.NextContinuousFrame(&frame_id)) {
+ // We have our next frame.
+ *next_frame = true;
+ } else {
+ if (WaitForNextFrame(timeout)) return false;
+
+ if (!frame_id_map_.NextAudioFrameAllowingMissingFrames(&frame_id)) {
+ return false;
+ }
+ *next_frame = false;
+ }
+
+ ConstFrameIterator it = frames_.find(frame_id);
+ DCHECK(it != frames_.end());
+ if (it == frames_.end()) return false;
+
+ return it->second->GetEncodedAudioFrame(audio_frame, rtp_timestamp);
+}
+
+// This does not release the frame.
+bool Framer::GetEncodedVideoFrame(const base::TimeTicks& timeout,
+ EncodedVideoFrame* video_frame,
+ uint32* rtp_timestamp,
+ bool* next_frame) {
+ uint8 frame_id;
+ // Find frame id.
+ if (frame_id_map_.NextContinuousFrame(&frame_id)) {
+ // We have our next frame.
+ *next_frame = true;
+ } else {
+ if (WaitForNextFrame(timeout)) return false;
+
+ // Check if we can skip frames when our decoder is too slow.
+ if (!decoder_faster_than_max_frame_rate_) return false;
+
+ if (!frame_id_map_.NextVideoFrameAllowingSkippingFrames(&frame_id)) {
+ return false;
+ }
+ *next_frame = false;
+ }
+
+ ConstFrameIterator it = frames_.find(frame_id);
+ DCHECK(it != frames_.end());
+ if (it == frames_.end()) return false;
+
+ return it->second->GetEncodedVideoFrame(video_frame, rtp_timestamp);
+}
+
+bool Framer::WaitForNextFrame(const base::TimeTicks& timeout) const {
+ base::TimeDelta wait_time = timeout - clock_->NowTicks();
+ if (wait_time.InMilliseconds() > 0)
+ return true;
+
+ return false;
+}
+
+void Framer::Reset() {
+ frame_id_map_.Clear();
+ frames_.clear();
+ cast_msg_builder_->Reset();
+}
+
+void Framer::ReleaseFrame(uint8 frame_id) {
+ frame_id_map_.RemoveOldFrames(frame_id);
+ frames_.erase(frame_id);
+
+ // We have a frame - remove all frames with lower frame id.
+ FrameList::iterator it;
+ for (it = frames_.begin(); it != frames_.end(); ) {
+ if (IsOlderFrameId(it->first, frame_id)) {
+ frames_.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+}
+
+bool Framer::TimeToSendNextCastMessage(base::TimeTicks* time_to_send) {
+ return cast_msg_builder_->TimeToSendNextCastMessage(time_to_send);
+}
+
+void Framer::SendCastMessage() {
+ cast_msg_builder_->UpdateCastMessage();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/framer/framer.gyp b/chromium/media/cast/framer/framer.gyp
new file mode 100644
index 00000000000..7b124f0c5de
--- /dev/null
+++ b/chromium/media/cast/framer/framer.gyp
@@ -0,0 +1,27 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_framer',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc',
+ ],
+ 'sources': [
+ 'cast_message_builder.cc',
+ 'cast_message_builder.h',
+ 'frame_buffer.cc',
+ 'frame_buffer.h',
+ 'frame_id_map.cc',
+ 'frame_id_map.h',
+ 'framer.cc',
+ 'framer.h',
+ ],
+ },
+ ], # targets
+}
diff --git a/chromium/media/cast/framer/framer.h b/chromium/media/cast/framer/framer.h
new file mode 100644
index 00000000000..93d79060607
--- /dev/null
+++ b/chromium/media/cast/framer/framer.h
@@ -0,0 +1,84 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_FRAMER_FRAMER_H_
+#define MEDIA_CAST_FRAMER_FRAMER_H_
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/framer/cast_message_builder.h"
+#include "media/cast/framer/frame_buffer.h"
+#include "media/cast/framer/frame_id_map.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+typedef std::map<uint8, linked_ptr<FrameBuffer> > FrameList;
+
+class Framer {
+ public:
+ Framer(RtpPayloadFeedback* incoming_payload_feedback,
+ uint32 ssrc,
+ bool decoder_faster_than_max_frame_rate,
+ int max_unacked_frames);
+ ~Framer();
+
+ void InsertPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header);
+
+ // Extracts a complete encoded frame - will only return a complete continuous
+ // frame.
+ // Returns false if the frame does not exist or if the frame is not complete
+ // within the given time frame.
+ bool GetEncodedVideoFrame(const base::TimeTicks& timeout,
+ EncodedVideoFrame* video_frame,
+ uint32* rtp_timestamp,
+ bool* next_frame);
+
+ bool GetEncodedAudioFrame(const base::TimeTicks& timeout,
+ EncodedAudioFrame* audio_frame,
+ uint32* rtp_timestamp,
+ bool* next_frame);
+
+ void ReleaseFrame(uint8 frame_id);
+
+ // Reset framer state to original state and flush all pending buffers.
+ void Reset();
+ bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
+ void SendCastMessage();
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ cast_msg_builder_->set_clock(clock);
+ }
+
+ private:
+ // Return true if we should wait.
+ bool WaitForNextFrame(const base::TimeTicks& timeout) const;
+
+ const bool decoder_faster_than_max_frame_rate_;
+ FrameList frames_;
+ FrameIdMap frame_id_map_;
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ scoped_ptr<CastMessageBuilder> cast_msg_builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(Framer);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_FRAMER_FRAMER_H_
diff --git a/chromium/media/cast/framer/framer_unittest.cc b/chromium/media/cast/framer/framer_unittest.cc
new file mode 100644
index 00000000000..6f83706494f
--- /dev/null
+++ b/chromium/media/cast/framer/framer_unittest.cc
@@ -0,0 +1,351 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/framer/framer.h"
+#include "media/cast/rtp_common/mock_rtp_payload_feedback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kFrameTimeMillisecond = 33;
+
+class FramerTest : public ::testing::Test {
+ protected:
+ FramerTest()
+ : mock_rtp_payload_feedback_(),
+ framer_(&mock_rtp_payload_feedback_, 0, true, 0) {
+ framer_.set_clock(&testing_clock_);
+ }
+
+ ~FramerTest() {}
+
+ void SetUp() {
+ // Build a default one packet frame - populate webrtc header.
+ rtp_header_.is_key_frame = false;
+ rtp_header_.frame_id = 0;
+ rtp_header_.packet_id = 0;
+ rtp_header_.max_packet_id = 0;
+ rtp_header_.is_reference = false;
+ rtp_header_.reference_frame_id = 0;
+ payload_.assign(kIpPacketSize, 0);
+
+ EXPECT_CALL(mock_rtp_payload_feedback_,
+ CastFeedback(testing::_)).WillRepeatedly(testing::Return());
+ }
+
+ std::vector<uint8> payload_;
+ RtpCastHeader rtp_header_;
+ MockRtpPayloadFeedback mock_rtp_payload_feedback_;
+ Framer framer_;
+ base::SimpleTestTickClock testing_clock_;
+};
+
+
+TEST_F(FramerTest, EmptyState) {
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+}
+
+TEST_F(FramerTest, AlwaysStartWithKey) {
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+
+ // Insert non key first frame.
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ rtp_header_.frame_id = 1;
+ rtp_header_.is_key_frame = true;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(1, frame.frame_id);
+ EXPECT_TRUE(frame.key_frame);
+ framer_.ReleaseFrame(frame.frame_id);
+}
+
+TEST_F(FramerTest, CompleteFrame) {
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+
+ // start with a complete key frame.
+ rtp_header_.is_key_frame = true;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(0, frame.frame_id);
+ EXPECT_TRUE(frame.key_frame);
+ framer_.ReleaseFrame(frame.frame_id);
+
+ // Incomplete delta.
+ ++rtp_header_.frame_id;
+ rtp_header_.is_key_frame = false;
+ rtp_header_.max_packet_id = 2;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+
+ // Complete delta - can't skip, as incomplete sequence.
+ ++rtp_header_.frame_id;
+ rtp_header_.max_packet_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+}
+
+TEST_F(FramerTest, ContinuousSequence) {
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+
+ // start with a complete key frame.
+ rtp_header_.is_key_frame = true;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(0, frame.frame_id);
+ EXPECT_TRUE(frame.key_frame);
+ framer_.ReleaseFrame(frame.frame_id);
+
+ // Complete - not continuous.
+ rtp_header_.frame_id = 2;
+ rtp_header_.is_key_frame = false;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+}
+
+TEST_F(FramerTest, Wrap) {
+ // Insert key frame, frame_id = 255 (will jump to that)
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+
+ // Start with a complete key frame.
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 255;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(255, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+
+ // Insert wrapped delta frame - should be continuous.
+ rtp_header_.is_key_frame = false;
+ rtp_header_.frame_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(0, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+}
+
+TEST_F(FramerTest, Reset) {
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+
+ // Start with a complete key frame.
+ rtp_header_.is_key_frame = true;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ framer_.Reset();
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+}
+
+TEST_F(FramerTest, RequireKeyAfterReset) {
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+ framer_.Reset();
+
+ // Start with a complete key frame.
+ rtp_header_.is_key_frame = false;
+ rtp_header_.frame_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ rtp_header_.frame_id = 1;
+ rtp_header_.is_key_frame = true;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+}
+
+TEST_F(FramerTest, BasicNonLastReferenceId) {
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+
+ base::TimeTicks timeout;
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ framer_.ReleaseFrame(frame.frame_id);
+
+ rtp_header_.is_key_frame = false;
+ rtp_header_.is_reference = true;
+ rtp_header_.reference_frame_id = 0;
+ rtp_header_.frame_id = 5;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+
+ timeout += base::TimeDelta::FromMilliseconds(kFrameTimeMillisecond);
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kFrameTimeMillisecond));
+
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_FALSE(next_frame);
+}
+
+TEST_F(FramerTest, InOrderReferenceFrameSelection) {
+ // Create pattern: 0, 1, 4, 5.
+ EncodedVideoFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.is_key_frame = false;
+ rtp_header_.frame_id = 1;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+
+ // Insert frame #2 partially.
+ rtp_header_.frame_id = 2;
+ rtp_header_.max_packet_id = 1;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.frame_id = 4;
+ rtp_header_.max_packet_id = 0;
+ rtp_header_.is_reference = true;
+ rtp_header_.reference_frame_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_EQ(0, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(1, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_FALSE(next_frame);
+ EXPECT_EQ(4, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+ // Insert remaining packet of frame #2 - should no be continuous.
+ rtp_header_.frame_id = 2;
+ rtp_header_.packet_id = 1;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ rtp_header_.is_reference = false;
+ rtp_header_.frame_id = 5;
+ rtp_header_.packet_id = 0;
+ rtp_header_.max_packet_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(5, frame.frame_id);
+}
+
+TEST_F(FramerTest, AudioWrap) {
+ // All audio frames are marked as key frames.
+ EncodedAudioFrame frame;
+ uint32_t rtp_timestamp;
+ base::TimeTicks timeout;
+ bool next_frame = false;
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 254;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(254, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+
+ rtp_header_.frame_id = 255;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+
+ // Insert wrapped frame - should be continuous.
+ rtp_header_.frame_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(255, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(0, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+}
+
+TEST_F(FramerTest, AudioWrapWithMissingFrame) {
+ // All audio frames are marked as key frames.
+ EncodedAudioFrame frame;
+ uint32_t rtp_timestamp;
+ bool next_frame = false;
+ base::TimeTicks timeout;
+
+ // Insert and get first packet.
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 253;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(253, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+
+ // Insert third and fourth packets.
+ rtp_header_.frame_id = 255;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.frame_id = 0;
+ framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+
+ // Get third and fourth packets.
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_FALSE(next_frame);
+ EXPECT_EQ(255, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ &next_frame));
+ EXPECT_TRUE(next_frame);
+ EXPECT_EQ(0, frame.frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/pacing/mock_paced_packet_sender.h b/chromium/media/cast/pacing/mock_paced_packet_sender.h
new file mode 100644
index 00000000000..40d3e622027
--- /dev/null
+++ b/chromium/media/cast/pacing/mock_paced_packet_sender.h
@@ -0,0 +1,26 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_PACING_MOCK_PACED_PACKET_SENDER_H_
+#define MEDIA_CAST_PACING_MOCK_PACED_PACKET_SENDER_H_
+
+#include "media/cast/pacing/paced_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockPacedPacketSender : public PacedPacketSender {
+ public:
+ MOCK_METHOD2(SendPacket,
+ bool(const std::vector<uint8>& packet, int num_of_packets));
+ MOCK_METHOD2(ResendPacket,
+ bool(const std::vector<uint8>& packet, int num_of_packets));
+ MOCK_METHOD1(SendRtcpPacket, bool(const std::vector<uint8>& packet));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_PACING_MOCK_PACED_PACKET_SENDER_H_
diff --git a/chromium/media/cast/pacing/mock_packet_sender.h b/chromium/media/cast/pacing/mock_packet_sender.h
new file mode 100644
index 00000000000..bad9bac89d1
--- /dev/null
+++ b/chromium/media/cast/pacing/mock_packet_sender.h
@@ -0,0 +1,22 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_PACING_MOCK_PACKET_SENDER_H_
+#define MEDIA_CAST_PACING_MOCK_PACKET_SENDER_H_
+
+#include "media/cast/cast_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockPacketSender : public PacketSender {
+ public:
+ MOCK_METHOD2(SendPacket, bool(const uint8* packet, int length));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_PACING_MOCK_PACKET_SENDER_H_
diff --git a/chromium/media/cast/pacing/paced_sender.cc b/chromium/media/cast/pacing/paced_sender.cc
new file mode 100644
index 00000000000..d2935f3e65a
--- /dev/null
+++ b/chromium/media/cast/pacing/paced_sender.cc
@@ -0,0 +1,123 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/pacing/paced_sender.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kPacingIntervalMs = 10;
+static const int kPacingMaxBurstsPerFrame = 3;
+
+PacedSender::PacedSender(scoped_refptr<CastThread> cast_thread,
+ PacketSender* transport)
+ : cast_thread_(cast_thread),
+ burst_size_(1),
+ packets_sent_in_burst_(0),
+ transport_(transport),
+ clock_(&default_tick_clock_),
+ weak_factory_(this) {
+ ScheduleNextSend();
+}
+
+PacedSender::~PacedSender() {}
+
+bool PacedSender::SendPacket(const std::vector<uint8>& packet,
+ int num_of_packets_in_frame) {
+ if (!packet_list_.empty()) {
+ // We have a queue put the new packets last in the list.
+ packet_list_.push_back(packet);
+ UpdateBurstSize(num_of_packets_in_frame);
+ return true;
+ }
+ UpdateBurstSize(num_of_packets_in_frame);
+
+ if (packets_sent_in_burst_ >= burst_size_) {
+ packet_list_.push_back(packet);
+ return true;
+ }
+ ++packets_sent_in_burst_;
+ return transport_->SendPacket(&(packet[0]), packet.size());
+}
+
+bool PacedSender::ResendPacket(const std::vector<uint8>& packet,
+ int num_of_packets_to_resend) {
+ if (!packet_list_.empty() || !resend_packet_list_.empty()) {
+ // We have a queue put the resend packets in the list.
+ resend_packet_list_.push_back(packet);
+ UpdateBurstSize(num_of_packets_to_resend);
+ return true;
+ }
+ UpdateBurstSize(num_of_packets_to_resend);
+
+ if (packets_sent_in_burst_ >= burst_size_) {
+ resend_packet_list_.push_back(packet);
+ return true;
+ }
+ ++packets_sent_in_burst_;
+ return transport_->SendPacket(&(packet[0]), packet.size());
+}
+
+bool PacedSender::SendRtcpPacket(const std::vector<uint8>& packet) {
+ // We pass the RTCP packets straight through.
+ return transport_->SendPacket(&(packet[0]), packet.size());
+}
+
+void PacedSender::ScheduleNextSend() {
+ base::TimeDelta time_to_next = time_last_process_ - clock_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kPacingIntervalMs);
+
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(0));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&PacedSender::SendNextPacketBurst, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void PacedSender::SendNextPacketBurst() {
+ int packets_to_send = burst_size_;
+ time_last_process_ = clock_->NowTicks();
+ for (int i = 0; i < packets_to_send; ++i) {
+ SendStoredPacket();
+ }
+ ScheduleNextSend();
+}
+
+void PacedSender::SendStoredPacket() {
+ if (packet_list_.empty() && resend_packet_list_.empty()) return;
+
+ if (!resend_packet_list_.empty()) {
+ // Send our re-send packets first.
+ const std::vector<uint8>& packet = resend_packet_list_.front();
+ transport_->SendPacket(&(packet[0]), packet.size());
+ resend_packet_list_.pop_front();
+ } else {
+ const std::vector<uint8>& packet = packet_list_.front();
+ transport_->SendPacket(&(packet[0]), packet.size());
+ packet_list_.pop_front();
+
+ if (packet_list_.empty()) {
+ burst_size_ = 1; // Reset burst size after we sent the last stored packet
+ packets_sent_in_burst_ = 0;
+ }
+ }
+}
+
+void PacedSender::UpdateBurstSize(int packets_to_send) {
+ packets_to_send = std::max(packets_to_send,
+ static_cast<int>(resend_packet_list_.size() + packet_list_.size()));
+
+ packets_to_send += (kPacingMaxBurstsPerFrame - 1); // Round up.
+
+ burst_size_ = std::max(packets_to_send / kPacingMaxBurstsPerFrame,
+ burst_size_);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/pacing/paced_sender.gyp b/chromium/media/cast/pacing/paced_sender.gyp
new file mode 100644
index 00000000000..53a1cdb1ef8
--- /dev/null
+++ b/chromium/media/cast/pacing/paced_sender.gyp
@@ -0,0 +1,23 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'paced_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'paced_sender.h',
+ 'paced_sender.cc',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ ],
+ },
+ ], # targets
+}
diff --git a/chromium/media/cast/pacing/paced_sender.h b/chromium/media/cast/pacing/paced_sender.h
new file mode 100644
index 00000000000..9dcd03e8469
--- /dev/null
+++ b/chromium/media/cast/pacing/paced_sender.h
@@ -0,0 +1,90 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_PACING_PACED_SENDER_H_
+#define MEDIA_CAST_PACING_PACED_SENDER_H_
+
+#include <list>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+
+namespace media {
+namespace cast {
+
+class PacedPacketSender {
+ public:
+ // Inform the pacer / sender of the total number of packets.
+ virtual bool SendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) = 0;
+
+ virtual bool ResendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) = 0;
+
+ virtual bool SendRtcpPacket(const std::vector<uint8>& packet) = 0;
+
+ virtual ~PacedPacketSender() {}
+};
+
+class PacedSender : public PacedPacketSender,
+ public base::NonThreadSafe,
+ public base::SupportsWeakPtr<PacedSender> {
+ public:
+ PacedSender(scoped_refptr<CastThread> cast_thread, PacketSender* transport);
+ virtual ~PacedSender();
+
+ virtual bool SendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) OVERRIDE;
+
+ virtual bool ResendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) OVERRIDE;
+
+ virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE;
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ }
+
+ protected:
+ // Schedule a delayed task on the main cast thread when it's time to send the
+ // next packet burst.
+ void ScheduleNextSend();
+
+ // Process any pending packets in the queue(s).
+ void SendNextPacketBurst();
+
+ private:
+ void SendStoredPacket();
+ void UpdateBurstSize(int num_of_packets);
+
+ typedef std::list<std::vector<uint8> > PacketList;
+
+ scoped_refptr<CastThread> cast_thread_;
+ int burst_size_;
+ int packets_sent_in_burst_;
+ base::TimeTicks time_last_process_;
+ PacketList packet_list_;
+ PacketList resend_packet_list_;
+ PacketSender* transport_;
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ base::WeakPtrFactory<PacedSender> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(PacedSender);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_PACING_PACED_SENDER_H_
diff --git a/chromium/media/cast/pacing/paced_sender_unittest.cc b/chromium/media/cast/pacing/paced_sender_unittest.cc
new file mode 100644
index 00000000000..b823b16fa7e
--- /dev/null
+++ b/chromium/media/cast/pacing/paced_sender_unittest.cc
@@ -0,0 +1,265 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/pacing/mock_packet_sender.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+static const uint8 kValue = 123;
+static const size_t kSize1 = 100;
+static const size_t kSize2 = 101;
+static const size_t kSize3 = 102;
+static const size_t kSize4 = 103;
+static const size_t kNackSize = 104;
+static const int64 kStartMillisecond = 123456789;
+
+class PacedSenderTest : public ::testing::Test {
+ protected:
+ PacedSenderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ virtual ~PacedSenderTest() {}
+
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ paced_sender_.reset(new PacedSender(cast_thread_, &mock_transport_));
+ paced_sender_->set_clock(&testing_clock_);
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ MockPacketSender mock_transport_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_ptr<PacedSender> paced_sender_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(PacedSenderTest, PassThroughRtcp) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1).WillRepeatedly(
+ testing::Return(true));
+
+ std::vector<uint8> packet(kSize1, kValue);
+ int num_of_packets = 1;
+ EXPECT_TRUE(paced_sender_->SendPacket(packet, num_of_packets));
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
+ EXPECT_TRUE(paced_sender_->ResendPacket(packet, num_of_packets));
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1).WillRepeatedly(
+ testing::Return(true));
+ EXPECT_TRUE(paced_sender_->SendRtcpPacket(packet));
+}
+
+TEST_F(PacedSenderTest, BasicPace) {
+ std::vector<uint8> packet(kSize1, kValue);
+ int num_of_packets = 9;
+
+ EXPECT_CALL(mock_transport_,
+ SendPacket(_, kSize1)).Times(3).WillRepeatedly(testing::Return(true));
+ for (int i = 0; i < num_of_packets; ++i) {
+ EXPECT_TRUE(paced_sender_->SendPacket(packet, num_of_packets));
+ }
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+
+ // Check that we get the next burst.
+ EXPECT_CALL(mock_transport_,
+ SendPacket(_, kSize1)).Times(3).WillRepeatedly(testing::Return(true));
+
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // If we call process too early make sure we don't send any packets.
+ timeout = base::TimeDelta::FromMilliseconds(5);
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Check that we get the next burst.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
+ testing::Return(true));
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Check that we don't get any more packets.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
+ timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+}
+
+TEST_F(PacedSenderTest, PaceWithNack) {
+ // Testing what happen when we get multiple NACK requests for a fully lost
+ // frames just as we sent the first packets in a frame.
+ std::vector<uint8> firts_packet(kSize1, kValue);
+ std::vector<uint8> second_packet(kSize2, kValue);
+ std::vector<uint8> nack_packet(kNackSize, kValue);
+ int num_of_packets_in_frame = 9;
+ int num_of_packets_in_nack = 9;
+
+ // Check that the first burst of the frame go out on the wire.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
+ testing::Return(true));
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ EXPECT_TRUE(paced_sender_->SendPacket(firts_packet,
+ num_of_packets_in_frame));
+ }
+ // Add first NACK request.
+ for (int i = 0; i < num_of_packets_in_nack; ++i) {
+ EXPECT_TRUE(paced_sender_->ResendPacket(nack_packet,
+ num_of_packets_in_nack));
+ }
+ // Check that we get the first NACK burst.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(5).
+ WillRepeatedly(testing::Return(true));
+
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Add second NACK request.
+ for (int i = 0; i < num_of_packets_in_nack; ++i) {
+ EXPECT_TRUE(paced_sender_->ResendPacket(nack_packet,
+ num_of_packets_in_nack));
+ }
+
+ // Check that we get the next NACK burst.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(7)
+ .WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // End of NACK plus a packet from the oldest frame.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(6)
+ .WillRepeatedly(testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1)
+ .WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Add second frame.
+ // Make sure we don't delay the second frame due to the previous packets.
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ EXPECT_TRUE(paced_sender_->SendPacket(second_packet,
+ num_of_packets_in_frame));
+ }
+
+ // Last packets of frame 1 and the first packets of frame 2.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(5).WillRepeatedly(
+ testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(2).WillRepeatedly(
+ testing::Return(true));
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Last packets of frame 2.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(7).WillRepeatedly(
+ testing::Return(true));
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // No more packets.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(0);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+}
+
+TEST_F(PacedSenderTest, PaceWith60fps) {
+ // Testing what happen when we get multiple NACK requests for a fully lost
+ // frames just as we sent the first packets in a frame.
+ std::vector<uint8> firts_packet(kSize1, kValue);
+ std::vector<uint8> second_packet(kSize2, kValue);
+ std::vector<uint8> third_packet(kSize3, kValue);
+ std::vector<uint8> fourth_packet(kSize4, kValue);
+ base::TimeDelta timeout_10ms = base::TimeDelta::FromMilliseconds(10);
+ int num_of_packets_in_frame = 9;
+
+ // Check that the first burst of the frame go out on the wire.
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
+ testing::Return(true));
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ EXPECT_TRUE(paced_sender_->SendPacket(firts_packet,
+ num_of_packets_in_frame));
+ }
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).
+ WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(6));
+
+ // Add second frame, after 16 ms.
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ EXPECT_TRUE(paced_sender_->SendPacket(second_packet,
+ num_of_packets_in_frame));
+ }
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(4));
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3)
+ .WillRepeatedly(testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(1)
+ .WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(4)
+ .WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(3));
+
+ // Add third frame, after 33 ms.
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ EXPECT_TRUE(paced_sender_->SendPacket(third_packet,
+ num_of_packets_in_frame));
+ }
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(4)
+ .WillRepeatedly(testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(1)
+ .WillRepeatedly(testing::Return(true));
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(7));
+ task_runner_->RunTasks();
+
+ // Add fourth frame, after 50 ms.
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ EXPECT_TRUE(paced_sender_->SendPacket(fourth_packet,
+ num_of_packets_in_frame));
+ }
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(6)
+ .WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(2)
+ .WillRepeatedly(testing::Return(true));
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(4)
+ .WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(5)
+ .WillRepeatedly(testing::Return(true));
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(0);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
new file mode 100644
index 00000000000..09e2cb4faf5
--- /dev/null
+++ b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
@@ -0,0 +1,38 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_MOCK_RTCP_RECEIVER_FEEDBACK_H_
+#define MEDIA_CAST_RTCP_MOCK_RTCP_RECEIVER_FEEDBACK_H_
+
+#include <vector>
+
+#include "media/cast/rtcp/rtcp_receiver.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
+ public:
+ MOCK_METHOD1(OnReceivedSenderReport,
+ void(const RtcpSenderInfo& remote_sender_info));
+
+ MOCK_METHOD1(OnReceiverReferenceTimeReport,
+ void(const RtcpReceiverReferenceTimeReport& remote_time_report));
+
+ MOCK_METHOD0(OnReceivedSendReportRequest, void());
+};
+
+class MockRtcpRttFeedback : public RtcpRttFeedback {
+ public:
+ MOCK_METHOD3(OnReceivedDelaySinceLastReport,
+ void(uint32 media_ssrc,
+ uint32 last_report,
+ uint32 delay_since_last_report));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_MOCK_RTCP_RECEIVER_FEEDBACK_H_
diff --git a/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h
new file mode 100644
index 00000000000..3947625489f
--- /dev/null
+++ b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_MOCK_RTCP_SENDER_FEEDBACK_H_
+#define MEDIA_CAST_RTCP_MOCK_RTCP_SENDER_FEEDBACK_H_
+
+#include <vector>
+
+#include "media/cast/rtcp/rtcp_receiver.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockRtcpSenderFeedback : public RtcpSenderFeedback {
+ public:
+ MOCK_METHOD1(OnReceivedReportBlock,
+ void(const RtcpReportBlock& report_block));
+
+ MOCK_METHOD0(OnReceivedIntraFrameRequest, void());
+
+ MOCK_METHOD2(OnReceivedRpsi, void(uint8 payload_type, uint64 picture_id));
+
+ MOCK_METHOD1(OnReceivedRemb, void(uint32 bitrate));
+
+ MOCK_METHOD1(OnReceivedNackRequest,
+ void(const std::list<uint16>& nack_sequence_numbers));
+
+ MOCK_METHOD1(OnReceivedCastFeedback,
+ void(const RtcpCastMessage& cast_feedback));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_MOCK_RTCP_SENDER_FEEDBACK_H_
diff --git a/chromium/media/cast/rtcp/rtcp.cc b/chromium/media/cast/rtcp/rtcp.cc
new file mode 100644
index 00000000000..c3e2c8e4d88
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp.cc
@@ -0,0 +1,416 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/rtcp.h"
+
+#include "base/debug/trace_event.h"
+#include "base/rand_util.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/rtcp/rtcp_receiver.h"
+#include "media/cast/rtcp/rtcp_sender.h"
+#include "media/cast/rtcp/rtcp_utility.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+static const int kMaxRttMs = 1000000; // 1000 seconds.
+
+// Time limit for received RTCP messages when we stop using it for lip-sync.
+static const int64 kMaxDiffSinceReceivedRtcpMs = 100000; // 100 seconds.
+
+class LocalRtcpRttFeedback : public RtcpRttFeedback {
+ public:
+ explicit LocalRtcpRttFeedback(Rtcp* rtcp)
+ : rtcp_(rtcp) {
+ }
+
+ virtual void OnReceivedDelaySinceLastReport(
+ uint32 receivers_ssrc,
+ uint32 last_report,
+ uint32 delay_since_last_report) OVERRIDE {
+ rtcp_->OnReceivedDelaySinceLastReport(receivers_ssrc,
+ last_report,
+ delay_since_last_report);
+ }
+
+ private:
+ Rtcp* rtcp_;
+};
+
+RtcpCastMessage::RtcpCastMessage(uint32 media_ssrc)
+ : media_ssrc_(media_ssrc) {}
+
+RtcpCastMessage::~RtcpCastMessage() {}
+
+RtcpNackMessage::RtcpNackMessage() {}
+RtcpNackMessage::~RtcpNackMessage() {}
+
+RtcpRembMessage::RtcpRembMessage() {}
+RtcpRembMessage::~RtcpRembMessage() {}
+
+
+class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
+ public:
+ explicit LocalRtcpReceiverFeedback(Rtcp* rtcp)
+ : rtcp_(rtcp) {
+ }
+
+ virtual void OnReceivedSenderReport(
+ const RtcpSenderInfo& remote_sender_info) OVERRIDE {
+ rtcp_->OnReceivedNtp(remote_sender_info.ntp_seconds,
+ remote_sender_info.ntp_fraction);
+ if (remote_sender_info.send_packet_count != 0) {
+ rtcp_->OnReceivedLipSyncInfo(remote_sender_info.rtp_timestamp,
+ remote_sender_info.ntp_seconds,
+ remote_sender_info.ntp_fraction);
+ }
+ }
+
+ virtual void OnReceiverReferenceTimeReport(
+ const RtcpReceiverReferenceTimeReport& remote_time_report) OVERRIDE {
+ rtcp_->OnReceivedNtp(remote_time_report.ntp_seconds,
+ remote_time_report.ntp_fraction);
+ }
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {
+ rtcp_->OnReceivedSendReportRequest();
+ }
+
+ private:
+ Rtcp* rtcp_;
+};
+
+Rtcp::Rtcp(RtcpSenderFeedback* sender_feedback,
+ PacedPacketSender* paced_packet_sender,
+ RtpSenderStatistics* rtp_sender_statistics,
+ RtpReceiverStatistics* rtp_receiver_statistics,
+ RtcpMode rtcp_mode,
+ const base::TimeDelta& rtcp_interval,
+ bool sending_media,
+ uint32 local_ssrc,
+ const std::string& c_name)
+ : rtcp_interval_(rtcp_interval),
+ rtcp_mode_(rtcp_mode),
+ sending_media_(sending_media),
+ local_ssrc_(local_ssrc),
+ rtp_sender_statistics_(rtp_sender_statistics),
+ rtp_receiver_statistics_(rtp_receiver_statistics),
+ receiver_feedback_(new LocalRtcpReceiverFeedback(this)),
+ rtt_feedback_(new LocalRtcpRttFeedback(this)),
+ rtcp_sender_(new RtcpSender(paced_packet_sender, local_ssrc, c_name)),
+ last_report_sent_(0),
+ last_report_received_(0),
+ last_received_rtp_timestamp_(0),
+ last_received_ntp_seconds_(0),
+ last_received_ntp_fraction_(0),
+ min_rtt_(base::TimeDelta::FromMilliseconds(kMaxRttMs)),
+ number_of_rtt_in_avg_(0),
+ clock_(&default_tick_clock_) {
+ rtcp_receiver_.reset(new RtcpReceiver(sender_feedback,
+ receiver_feedback_.get(),
+ rtt_feedback_.get(),
+ local_ssrc));
+}
+
+Rtcp::~Rtcp() {}
+
+// static
+bool Rtcp::IsRtcpPacket(const uint8* packet, int length) {
+ DCHECK_GE(length, 8) << "Invalid RTCP packet";
+ if (length < 8) return false;
+
+ uint8 packet_type = packet[1];
+ if (packet_type >= kPacketTypeLow && packet_type <= kPacketTypeHigh) {
+ return true;
+ }
+ return false;
+}
+
+// static
+uint32 Rtcp::GetSsrcOfSender(const uint8* rtcp_buffer, int length) {
+ uint32 ssrc_of_sender;
+ net::BigEndianReader big_endian_reader(rtcp_buffer, length);
+ big_endian_reader.Skip(4); // Skip header
+ big_endian_reader.ReadU32(&ssrc_of_sender);
+ return ssrc_of_sender;
+}
+
+base::TimeTicks Rtcp::TimeToSendNextRtcpReport() {
+ if (next_time_to_send_rtcp_.is_null()) {
+ UpdateNextTimeToSendRtcp();
+ }
+ return next_time_to_send_rtcp_;
+}
+
+void Rtcp::SetRemoteSSRC(uint32 ssrc) {
+ rtcp_receiver_->SetRemoteSSRC(ssrc);
+}
+
+void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, int length) {
+ RtcpParser rtcp_parser(rtcp_buffer, length);
+ if (!rtcp_parser.IsValid()) {
+ // Silently ignore packet.
+ DLOG(ERROR) << "Received invalid RTCP packet";
+ return;
+ }
+ rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
+}
+
+void Rtcp::SendRtcpCast(const RtcpCastMessage& cast_message) {
+ uint32 packet_type_flags = 0;
+ base::TimeTicks now = clock_->NowTicks();
+
+ if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
+ if (sending_media_) {
+ packet_type_flags = RtcpSender::kRtcpSr;
+ } else {
+ packet_type_flags = RtcpSender::kRtcpRr;
+ }
+ }
+ packet_type_flags |= RtcpSender::kRtcpCast;
+
+ SendRtcp(now, packet_type_flags, 0, &cast_message);
+}
+
+void Rtcp::SendRtcpPli(uint32 pli_remote_ssrc) {
+ uint32 packet_type_flags = 0;
+ base::TimeTicks now = clock_->NowTicks();
+
+ if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
+ if (sending_media_) {
+ packet_type_flags = RtcpSender::kRtcpSr;
+ } else {
+ packet_type_flags = RtcpSender::kRtcpRr;
+ }
+ }
+ packet_type_flags |= RtcpSender::kRtcpPli;
+ SendRtcp(now, packet_type_flags, pli_remote_ssrc, NULL);
+}
+
+void Rtcp::SendRtcpReport(uint32 media_ssrc) {
+ uint32 packet_type_flags;
+ base::TimeTicks now = clock_->NowTicks();
+ if (sending_media_) {
+ packet_type_flags = RtcpSender::kRtcpSr;
+ } else {
+ packet_type_flags = RtcpSender::kRtcpRr;
+ }
+ SendRtcp(now, packet_type_flags, media_ssrc, NULL);
+}
+
+void Rtcp::SendRtcp(const base::TimeTicks& now,
+ uint32 packet_type_flags,
+ uint32 media_ssrc,
+ const RtcpCastMessage* cast_message) {
+ if (packet_type_flags & RtcpSender::kRtcpSr ||
+ packet_type_flags & RtcpSender::kRtcpRr) {
+ UpdateNextTimeToSendRtcp();
+ }
+ if (packet_type_flags & RtcpSender::kRtcpSr) {
+ RtcpSenderInfo sender_info;
+
+ if (rtp_sender_statistics_) {
+ rtp_sender_statistics_->GetStatistics(now, &sender_info);
+ } else {
+ memset(&sender_info, 0, sizeof(sender_info));
+ }
+ time_last_report_sent_ = now;
+ last_report_sent_ = (sender_info.ntp_seconds << 16) +
+ (sender_info.ntp_fraction >> 16);
+
+ RtcpDlrrReportBlock dlrr;
+ if (!time_last_report_received_.is_null()) {
+ packet_type_flags |= RtcpSender::kRtcpDlrr;
+ dlrr.last_rr = last_report_received_;
+ uint32 delay_seconds = 0;
+ uint32 delay_fraction = 0;
+ base::TimeDelta delta = now - time_last_report_received_;
+ ConvertTimeToFractions(delta.InMicroseconds(),
+ &delay_seconds,
+ &delay_fraction);
+
+ dlrr.delay_since_last_rr =
+ ConvertToNtpDiff(delay_seconds, delay_fraction);
+ }
+ rtcp_sender_->SendRtcp(packet_type_flags,
+ &sender_info,
+ NULL,
+ media_ssrc,
+ &dlrr,
+ NULL,
+ NULL);
+ } else {
+ RtcpReportBlock report_block;
+ report_block.remote_ssrc = 0; // Not needed to set send side.
+ report_block.media_ssrc = media_ssrc; // SSRC of the RTP packet sender.
+ if (rtp_receiver_statistics_) {
+ rtp_receiver_statistics_->GetStatistics(
+ &report_block.fraction_lost,
+ &report_block.cumulative_lost,
+ &report_block.extended_high_sequence_number,
+ &report_block.jitter);
+ }
+
+ report_block.last_sr = last_report_received_;
+ if (!time_last_report_received_.is_null()) {
+ uint32 delay_seconds = 0;
+ uint32 delay_fraction = 0;
+ base::TimeDelta delta = now - time_last_report_received_;
+ ConvertTimeToFractions(delta.InMicroseconds(),
+ &delay_seconds,
+ &delay_fraction);
+ report_block.delay_since_last_sr =
+ ConvertToNtpDiff(delay_seconds, delay_fraction);
+ } else {
+ report_block.delay_since_last_sr = 0;
+ }
+
+ packet_type_flags |= RtcpSender::kRtcpRrtr;
+ RtcpReceiverReferenceTimeReport rrtr;
+ ConvertTimeToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
+
+ time_last_report_sent_ = now;
+ last_report_sent_ = ConvertToNtpDiff(rrtr.ntp_seconds, rrtr.ntp_fraction);
+
+ rtcp_sender_->SendRtcp(packet_type_flags,
+ NULL,
+ &report_block,
+ media_ssrc,
+ NULL,
+ &rrtr,
+ cast_message);
+ }
+}
+
+void Rtcp::OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction) {
+ last_report_received_ = (ntp_seconds << 16) + (ntp_fraction >> 16);
+
+ base::TimeTicks now = clock_->NowTicks();
+ time_last_report_received_ = now;
+}
+
+void Rtcp::OnReceivedLipSyncInfo(uint32 rtp_timestamp,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction) {
+ last_received_rtp_timestamp_ = rtp_timestamp;
+ last_received_ntp_seconds_ = ntp_seconds;
+ last_received_ntp_fraction_ = ntp_fraction;
+}
+
+void Rtcp::OnReceivedSendReportRequest() {
+ base::TimeTicks now = clock_->NowTicks();
+
+ // Trigger a new RTCP report at next timer.
+ next_time_to_send_rtcp_ = now;
+}
+
+bool Rtcp::RtpTimestampInSenderTime(int frequency, uint32 rtp_timestamp,
+ base::TimeTicks* rtp_timestamp_in_ticks) const {
+ if (last_received_ntp_seconds_ == 0) return false;
+
+ int wrap = CheckForWrapAround(rtp_timestamp, last_received_rtp_timestamp_);
+ int64 rtp_timestamp_int64 = rtp_timestamp;
+ int64 last_received_rtp_timestamp_int64 = last_received_rtp_timestamp_;
+
+ if (wrap == 1) {
+ rtp_timestamp_int64 += (1LL << 32);
+ } else if (wrap == -1) {
+ last_received_rtp_timestamp_int64 += (1LL << 32);
+ }
+ // Time since the last RTCP message.
+ // Note that this can be negative since we can compare a rtp timestamp from
+ // a frame older than the last received RTCP message.
+ int64 rtp_timestamp_diff =
+ rtp_timestamp_int64 - last_received_rtp_timestamp_int64;
+
+ int frequency_khz = frequency / 1000;
+ int64 rtp_time_diff_ms = rtp_timestamp_diff / frequency_khz;
+
+ // Sanity check.
+ if (abs(rtp_time_diff_ms) > kMaxDiffSinceReceivedRtcpMs) return false;
+
+ *rtp_timestamp_in_ticks =
+ ConvertNtpToTime(last_received_ntp_seconds_, last_received_ntp_fraction_) +
+ base::TimeDelta::FromMilliseconds(rtp_time_diff_ms);
+ return true;
+}
+
+void Rtcp::OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
+ uint32 last_report,
+ uint32 delay_since_last_report) {
+ if (last_report_sent_ != last_report) return; // Feedback on another report.
+ if (time_last_report_sent_.is_null()) return;
+
+ base::TimeDelta sender_delay = clock_->NowTicks() - time_last_report_sent_;
+ UpdateRtt(sender_delay, ConvertFromNtpDiff(delay_since_last_report));
+}
+
+void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
+ const base::TimeDelta& receiver_delay) {
+ base::TimeDelta rtt = sender_delay - receiver_delay;
+ rtt = std::max(rtt, base::TimeDelta::FromMilliseconds(1));
+ rtt_ = rtt;
+ min_rtt_ = std::min(min_rtt_, rtt);
+ max_rtt_ = std::max(max_rtt_, rtt);
+
+ if (number_of_rtt_in_avg_ != 0) {
+ float ac = static_cast<float>(number_of_rtt_in_avg_);
+ avg_rtt_ms_= ((ac / (ac + 1.0)) * avg_rtt_ms_) +
+ ((1.0 / (ac + 1.0)) * rtt.InMilliseconds());
+ } else {
+ avg_rtt_ms_ = rtt.InMilliseconds();
+ }
+ number_of_rtt_in_avg_++;
+ TRACE_COUNTER_ID1("cast_rtcp", "RTT", local_ssrc_, rtt.InMilliseconds());
+}
+
+bool Rtcp::Rtt(base::TimeDelta* rtt,
+ base::TimeDelta* avg_rtt,
+ base::TimeDelta* min_rtt,
+ base::TimeDelta* max_rtt) const {
+ DCHECK(rtt) << "Invalid argument";
+ DCHECK(avg_rtt) << "Invalid argument";
+ DCHECK(min_rtt) << "Invalid argument";
+ DCHECK(max_rtt) << "Invalid argument";
+
+ if (number_of_rtt_in_avg_ == 0) return false;
+
+ *rtt = rtt_;
+ *avg_rtt = base::TimeDelta::FromMilliseconds(avg_rtt_ms_);
+ *min_rtt = min_rtt_;
+ *max_rtt = max_rtt_;
+ return true;
+}
+
+int Rtcp::CheckForWrapAround(uint32 new_timestamp,
+ uint32 old_timestamp) const {
+ if (new_timestamp < old_timestamp) {
+ // This difference should be less than -2^31 if we have had a wrap around
+ // (e.g. |new_timestamp| = 1, |rtcp_rtp_timestamp| = 2^32 - 1). Since it is
+ // cast to a int32_t, it should be positive.
+ if (static_cast<int32>(new_timestamp - old_timestamp) > 0) {
+ return 1; // Forward wrap around.
+ }
+ } else if (static_cast<int32>(old_timestamp - new_timestamp) > 0) {
+ // This difference should be less than -2^31 if we have had a backward wrap
+ // around. Since it is cast to a int32, it should be positive.
+ return -1;
+ }
+ return 0;
+}
+
+void Rtcp::UpdateNextTimeToSendRtcp() {
+ int random = base::RandInt(0, 999);
+ base::TimeDelta time_to_next = (rtcp_interval_ / 2) +
+ (rtcp_interval_ * random / 1000);
+
+ base::TimeTicks now = clock_->NowTicks();
+ next_time_to_send_rtcp_ = now + time_to_next;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp.gyp b/chromium/media/cast/rtcp/rtcp.gyp
new file mode 100644
index 00000000000..14119988c8e
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp.gyp
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_rtcp',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'rtcp_defines.h',
+ 'rtcp.h',
+ 'rtcp.cc',
+ 'rtcp_receiver.cc',
+ 'rtcp_receiver.h',
+ 'rtcp_sender.cc',
+ 'rtcp_sender.h',
+ 'rtcp_utility.cc',
+ 'rtcp_utility.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/net/net.gyp:net',
+ ],
+ },
+ {
+ 'target_name': 'cast_rtcp_test',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'test_rtcp_packet_builder.cc',
+ 'test_rtcp_packet_builder.h',
+ ], # source
+ 'dependencies': [
+ 'cast_rtcp',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ ],
+ },
+ ],
+}
+
diff --git a/chromium/media/cast/rtcp/rtcp.h b/chromium/media/cast/rtcp/rtcp.h
new file mode 100644
index 00000000000..31962a526c6
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp.h
@@ -0,0 +1,173 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_RTCP_H_
+#define MEDIA_CAST_RTCP_RTCP_H_
+
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+
+namespace media {
+namespace cast {
+
+class LocalRtcpReceiverFeedback;
+class LocalRtcpRttFeedback;
+class PacedPacketSender;
+class RtcpReceiver;
+class RtcpSender;
+
+class RtcpSenderFeedback {
+ public:
+ virtual void OnReceivedReportBlock(const RtcpReportBlock& report_block) = 0;
+
+ virtual void OnReceivedIntraFrameRequest() = 0;
+
+ virtual void OnReceivedRpsi(uint8 payload_type, uint64 picture_id) = 0;
+
+ virtual void OnReceivedRemb(uint32 bitrate) = 0;
+
+ virtual void OnReceivedNackRequest(
+ const std::list<uint16>& nack_sequence_numbers) = 0;
+
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) = 0;
+
+ virtual ~RtcpSenderFeedback() {}
+};
+
+class RtpSenderStatistics {
+ public:
+ virtual void GetStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) = 0;
+
+ virtual ~RtpSenderStatistics() {}
+};
+
+class RtpReceiverStatistics {
+ public:
+ virtual void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter) = 0;
+
+ virtual ~RtpReceiverStatistics() {}
+};
+
+class Rtcp {
+ public:
+ Rtcp(RtcpSenderFeedback* sender_feedback,
+ PacedPacketSender* paced_packet_sender,
+ RtpSenderStatistics* rtp_sender_statistics,
+ RtpReceiverStatistics* rtp_receiver_statistics,
+ RtcpMode rtcp_mode,
+ const base::TimeDelta& rtcp_interval,
+ bool sending_media,
+ uint32 local_ssrc,
+ const std::string& c_name);
+
+ virtual ~Rtcp();
+
+ static bool IsRtcpPacket(const uint8* rtcp_buffer, int length);
+
+ static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, int length);
+
+ base::TimeTicks TimeToSendNextRtcpReport();
+ void SendRtcpReport(uint32 media_ssrc);
+ void SendRtcpPli(uint32 media_ssrc);
+ void SendRtcpCast(const RtcpCastMessage& cast_message);
+ void SetRemoteSSRC(uint32 ssrc);
+
+ void IncomingRtcpPacket(const uint8* rtcp_buffer, int length);
+ bool Rtt(base::TimeDelta* rtt, base::TimeDelta* avg_rtt,
+ base::TimeDelta* min_rtt, base::TimeDelta* max_rtt) const;
+ bool RtpTimestampInSenderTime(int frequency,
+ uint32 rtp_timestamp,
+ base::TimeTicks* rtp_timestamp_in_ticks) const;
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ }
+
+ protected:
+ int CheckForWrapAround(uint32 new_timestamp,
+ uint32 old_timestamp) const;
+
+ void OnReceivedLipSyncInfo(uint32 rtp_timestamp,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction);
+
+ private:
+ friend class LocalRtcpRttFeedback;
+ friend class LocalRtcpReceiverFeedback;
+
+ void SendRtcp(const base::TimeTicks& now,
+ uint32 packet_type_flags,
+ uint32 media_ssrc,
+ const RtcpCastMessage* cast_message);
+
+ void OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction);
+
+ void OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
+ uint32 last_report,
+ uint32 delay_since_last_report);
+
+ void OnReceivedSendReportRequest();
+
+ void UpdateRtt(const base::TimeDelta& sender_delay,
+ const base::TimeDelta& receiver_delay);
+
+ void UpdateNextTimeToSendRtcp();
+
+ const base::TimeDelta rtcp_interval_;
+ const RtcpMode rtcp_mode_;
+ const bool sending_media_;
+ const uint32 local_ssrc_;
+
+ // Not owned by this class.
+ RtpSenderStatistics* const rtp_sender_statistics_;
+ RtpReceiverStatistics* const rtp_receiver_statistics_;
+
+ scoped_ptr<LocalRtcpRttFeedback> rtt_feedback_;
+ scoped_ptr<LocalRtcpReceiverFeedback> receiver_feedback_;
+ scoped_ptr<RtcpSender> rtcp_sender_;
+ scoped_ptr<RtcpReceiver> rtcp_receiver_;
+
+ base::TimeTicks next_time_to_send_rtcp_;
+
+ base::TimeTicks time_last_report_sent_;
+ uint32 last_report_sent_;
+
+ base::TimeTicks time_last_report_received_;
+ uint32 last_report_received_;
+
+ uint32 last_received_rtp_timestamp_;
+ uint32 last_received_ntp_seconds_;
+ uint32 last_received_ntp_fraction_;
+
+ base::TimeDelta rtt_;
+ base::TimeDelta min_rtt_;
+ base::TimeDelta max_rtt_;
+ int number_of_rtt_in_avg_;
+ float avg_rtt_ms_;
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ DISALLOW_COPY_AND_ASSIGN(Rtcp);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_RTCP_H_
diff --git a/chromium/media/cast/rtcp/rtcp_defines.h b/chromium/media/cast/rtcp/rtcp_defines.h
new file mode 100644
index 00000000000..f0635f8ca8f
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_defines.h
@@ -0,0 +1,113 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_RTCP_DEFINES_H_
+#define MEDIA_CAST_RTCP_RTCP_DEFINES_H_
+
+#include <list>
+#include <map>
+#include <set>
+
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtcpCastMessage {
+ public:
+ explicit RtcpCastMessage(uint32 media_ssrc);
+ ~RtcpCastMessage();
+
+ uint32 media_ssrc_;
+ uint8 ack_frame_id_;
+ MissingFramesAndPacketsMap missing_frames_and_packets_;
+};
+
+struct RtcpSenderInfo {
+ // First three members are used for lipsync.
+ // First two members are used for rtt.
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ uint32 rtp_timestamp;
+ uint32 send_packet_count;
+ uint32 send_octet_count;
+};
+
+struct RtcpReportBlock {
+ uint32 remote_ssrc; // SSRC of sender of this report.
+ uint32 media_ssrc; // SSRC of the RTP packet sender.
+ uint8 fraction_lost;
+ uint32 cumulative_lost; // 24 bits valid.
+ uint32 extended_high_sequence_number;
+ uint32 jitter;
+ uint32 last_sr;
+ uint32 delay_since_last_sr;
+};
+
+struct RtcpRpsiMessage {
+ uint32 remote_ssrc;
+ uint8 payload_type;
+ uint64 picture_id;
+};
+
+class RtcpNackMessage {
+ public:
+ RtcpNackMessage();
+ ~RtcpNackMessage();
+
+ uint32 remote_ssrc;
+ std::list<uint16> nack_list;
+};
+
+class RtcpRembMessage {
+ public:
+ RtcpRembMessage();
+ ~RtcpRembMessage();
+
+ uint32 remb_bitrate;
+ std::list<uint32> remb_ssrcs;
+};
+
+struct RtcpReceiverReferenceTimeReport {
+ uint32 remote_ssrc;
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+};
+
+struct RtcpDlrrReportBlock {
+ uint32 last_rr;
+ uint32 delay_since_last_rr;
+};
+
+inline bool operator==(RtcpReportBlock lhs, RtcpReportBlock rhs) {
+ return lhs.remote_ssrc == rhs.remote_ssrc &&
+ lhs.media_ssrc == rhs.media_ssrc &&
+ lhs.fraction_lost == rhs.fraction_lost &&
+ lhs.cumulative_lost == rhs.cumulative_lost &&
+ lhs.extended_high_sequence_number == rhs.extended_high_sequence_number &&
+ lhs.jitter == rhs.jitter &&
+ lhs.last_sr == rhs.last_sr &&
+ lhs.delay_since_last_sr == rhs.delay_since_last_sr;
+}
+
+inline bool operator==(RtcpSenderInfo lhs, RtcpSenderInfo rhs) {
+ return lhs.ntp_seconds == rhs.ntp_seconds &&
+ lhs.ntp_fraction == rhs.ntp_fraction &&
+ lhs.rtp_timestamp == rhs.rtp_timestamp &&
+ lhs.send_packet_count == rhs.send_packet_count &&
+ lhs.send_octet_count == rhs.send_octet_count;
+}
+
+inline bool operator==(RtcpReceiverReferenceTimeReport lhs,
+ RtcpReceiverReferenceTimeReport rhs) {
+ return lhs.remote_ssrc == rhs.remote_ssrc &&
+ lhs.ntp_seconds == rhs.ntp_seconds &&
+ lhs.ntp_fraction == rhs.ntp_fraction;
+}
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_RTCP_DEFINES_H_
diff --git a/chromium/media/cast/rtcp/rtcp_receiver.cc b/chromium/media/cast/rtcp/rtcp_receiver.cc
new file mode 100644
index 00000000000..c0e9b9b501b
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_receiver.cc
@@ -0,0 +1,465 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/rtcp_receiver.h"
+
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "media/cast/rtcp/rtcp_utility.h"
+
+namespace media {
+namespace cast {
+
+RtcpReceiver::RtcpReceiver(RtcpSenderFeedback* sender_feedback,
+ RtcpReceiverFeedback* receiver_feedback,
+ RtcpRttFeedback* rtt_feedback,
+ uint32 local_ssrc)
+ : ssrc_(local_ssrc),
+ remote_ssrc_(0),
+ sender_feedback_(sender_feedback),
+ receiver_feedback_(receiver_feedback),
+ rtt_feedback_(rtt_feedback) {
+}
+
+RtcpReceiver::~RtcpReceiver() {}
+
+void RtcpReceiver::SetRemoteSSRC(uint32 ssrc) {
+ remote_ssrc_ = ssrc;
+}
+
+void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
+ RtcpFieldTypes field_type = rtcp_parser->Begin();
+ while (field_type != kRtcpNotValidCode) {
+ // Each "case" is responsible for iterate the parser to the next top
+ // level packet.
+ switch (field_type) {
+ case kRtcpSrCode:
+ HandleSenderReport(rtcp_parser);
+ break;
+ case kRtcpRrCode:
+ HandleReceiverReport(rtcp_parser);
+ break;
+ case kRtcpSdesCode:
+ HandleSDES(rtcp_parser);
+ break;
+ case kRtcpByeCode:
+ HandleBYE(rtcp_parser);
+ break;
+ case kRtcpXrCode:
+ HandleXr(rtcp_parser);
+ break;
+ case kRtcpGenericRtpFeedbackNackCode:
+ HandleNACK(rtcp_parser);
+ break;
+ case kRtcpGenericRtpFeedbackSrReqCode:
+ HandleSendReportRequest(rtcp_parser);
+ break;
+ case kRtcpPayloadSpecificPliCode:
+ HandlePLI(rtcp_parser);
+ break;
+ case kRtcpPayloadSpecificRpsiCode:
+ HandleRpsi(rtcp_parser);
+ break;
+ case kRtcpPayloadSpecificFirCode:
+ HandleFIR(rtcp_parser);
+ break;
+ case kRtcpPayloadSpecificAppCode:
+ HandlePayloadSpecificApp(rtcp_parser);
+ break;
+ case kRtcpPayloadSpecificRembCode:
+ case kRtcpPayloadSpecificRembItemCode:
+ // Ignore this until we want to support interop with webrtc.
+ rtcp_parser->Iterate();
+ break;
+ case kRtcpPayloadSpecificCastCode:
+ case kRtcpPayloadSpecificCastNackItemCode:
+ rtcp_parser->Iterate();
+ break;
+ case kRtcpNotValidCode:
+ case kRtcpReportBlockItemCode:
+ case kRtcpSdesChunkCode:
+ case kRtcpGenericRtpFeedbackNackItemCode:
+ case kRtcpPayloadSpecificFirItemCode:
+ case kRtcpXrRrtrCode:
+ case kRtcpXrDlrrCode:
+ case kRtcpXrUnknownItemCode:
+ rtcp_parser->Iterate();
+ DCHECK(false) << "Invalid state";
+ break;
+ }
+ field_type = rtcp_parser->FieldType();
+ }
+}
+
+void RtcpReceiver::HandleSenderReport(RtcpParser* rtcp_parser) {
+ RtcpFieldTypes rtcp_field_type = rtcp_parser->FieldType();
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ DCHECK(rtcp_field_type == kRtcpSrCode) << "Invalid state";
+
+ // Synchronization source identifier for the originator of this SR packet.
+ uint32 remote_ssrc = rtcp_field.sender_report.sender_ssrc;
+
+ TRACE_EVENT_INSTANT1("cast_rtcp", "SR", TRACE_EVENT_SCOPE_THREAD,
+ "remote_ssrc", remote_ssrc);
+
+ if (remote_ssrc_ == remote_ssrc) {
+ RtcpSenderInfo remote_sender_info;
+ remote_sender_info.ntp_seconds =
+ rtcp_field.sender_report.ntp_most_significant;
+ remote_sender_info.ntp_fraction =
+ rtcp_field.sender_report.ntp_least_significant;
+ remote_sender_info.rtp_timestamp =
+ rtcp_field.sender_report.rtp_timestamp;
+ remote_sender_info.send_packet_count =
+ rtcp_field.sender_report.sender_packet_count;
+ remote_sender_info.send_octet_count =
+ rtcp_field.sender_report.sender_octet_count;
+ if (receiver_feedback_) {
+ receiver_feedback_->OnReceivedSenderReport(remote_sender_info);
+ }
+ }
+ rtcp_field_type = rtcp_parser->Iterate();
+ while (rtcp_field_type == kRtcpReportBlockItemCode) {
+ HandleReportBlock(&rtcp_field, remote_ssrc);
+ rtcp_field_type = rtcp_parser->Iterate();
+ }
+}
+
+void RtcpReceiver::HandleReceiverReport(RtcpParser* rtcp_parser) {
+ RtcpFieldTypes rtcp_field_type = rtcp_parser->FieldType();
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ DCHECK(rtcp_field_type == kRtcpRrCode) << "Invalid state";
+
+ uint32 remote_ssrc = rtcp_field.receiver_report.sender_ssrc;
+
+ TRACE_EVENT_INSTANT1("cast_rtcp", "RR", TRACE_EVENT_SCOPE_THREAD,
+ "remote_ssrc", remote_ssrc);
+
+ rtcp_field_type = rtcp_parser->Iterate();
+ while (rtcp_field_type == kRtcpReportBlockItemCode) {
+ HandleReportBlock(&rtcp_field, remote_ssrc);
+ rtcp_field_type = rtcp_parser->Iterate();
+ }
+}
+
+void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
+ uint32 remote_ssrc) {
+ // This will be called once per report block in the Rtcp packet.
+ // We filter out all report blocks that are not for us.
+ // Each packet has max 31 RR blocks.
+ //
+ // We can calculate RTT if we send a send report and get a report block back.
+
+ // |rtcp_field.ReportBlockItem.ssrc| is the ssrc identifier of the source to
+ // which the information in this reception report block pertains.
+
+ const RtcpFieldReportBlockItem& rb = rtcp_field->report_block_item;
+
+ // Filter out all report blocks that are not for us.
+ if (rb.ssrc != ssrc_) {
+ // This block is not for us ignore it.
+ return;
+ }
+ TRACE_EVENT_INSTANT2("cast_rtcp", "RB", TRACE_EVENT_SCOPE_THREAD,
+ "remote_ssrc", remote_ssrc,
+ "ssrc", ssrc_);
+
+ TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::FractionLost",
+ rb.ssrc, rb.fraction_lost);
+ TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::CumulativeNumberOfPacketsLost",
+ rb.ssrc, rb.cumulative_number_of_packets_lost);
+ TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::Jitter",
+ rb.ssrc, rb.jitter);
+
+ RtcpReportBlock report_block;
+ report_block.remote_ssrc = remote_ssrc;
+ report_block.media_ssrc = rb.ssrc;
+ report_block.fraction_lost = rb.fraction_lost;
+ report_block.cumulative_lost = rb.cumulative_number_of_packets_lost;
+ report_block.extended_high_sequence_number =
+ rb.extended_highest_sequence_number;
+ report_block.jitter = rb.jitter;
+ report_block.last_sr = rb.last_sender_report;
+ report_block.delay_since_last_sr = rb.delay_last_sender_report;
+
+ if (sender_feedback_) {
+ sender_feedback_->OnReceivedReportBlock(report_block);
+ }
+ if (rtt_feedback_) {
+ rtt_feedback_->OnReceivedDelaySinceLastReport(rb.ssrc,
+ rb.last_sender_report,
+ rb.delay_last_sender_report);
+ }
+}
+
+void RtcpReceiver::HandleSDES(RtcpParser* rtcp_parser) {
+ RtcpFieldTypes field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpSdesChunkCode) {
+ HandleSDESChunk(rtcp_parser);
+ field_type = rtcp_parser->Iterate();
+ }
+}
+
+void RtcpReceiver::HandleSDESChunk(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ TRACE_EVENT_INSTANT1("cast_rtcp", "SDES", TRACE_EVENT_SCOPE_THREAD,
+ "cname", TRACE_STR_COPY(rtcp_field.c_name.name));
+}
+
+void RtcpReceiver::HandleXr(RtcpParser* rtcp_parser) {
+ RtcpFieldTypes rtcp_field_type = rtcp_parser->FieldType();
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ DCHECK(rtcp_field_type == kRtcpXrCode) << "Invalid state";
+
+ uint32 remote_ssrc = rtcp_field.extended_report.sender_ssrc;
+ rtcp_field_type = rtcp_parser->Iterate();
+
+ while (rtcp_field_type == kRtcpXrDlrrCode ||
+ rtcp_field_type == kRtcpXrRrtrCode ||
+ rtcp_field_type == kRtcpXrUnknownItemCode) {
+ if (rtcp_field_type == kRtcpXrRrtrCode) {
+ HandleRrtr(rtcp_parser, remote_ssrc);
+ } else if (rtcp_field_type == kRtcpXrDlrrCode) {
+ HandleDlrr(rtcp_parser);
+ }
+ rtcp_field_type = rtcp_parser->Iterate();
+ }
+}
+
+void RtcpReceiver::HandleRrtr(RtcpParser* rtcp_parser, uint32 remote_ssrc) {
+ if (remote_ssrc_ != remote_ssrc) {
+ // Not to us.
+ return;
+ }
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ RtcpReceiverReferenceTimeReport remote_time_report;
+ remote_time_report.remote_ssrc = remote_ssrc;
+ remote_time_report.ntp_seconds = rtcp_field.rrtr.ntp_most_significant;
+ remote_time_report.ntp_fraction = rtcp_field.rrtr.ntp_least_significant;
+
+ if (receiver_feedback_) {
+ receiver_feedback_->OnReceiverReferenceTimeReport(remote_time_report);
+ }
+}
+
+void RtcpReceiver::HandleDlrr(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ if (remote_ssrc_ != rtcp_field.dlrr.receivers_ssrc) {
+ // Not to us.
+ return;
+ }
+ if (rtt_feedback_) {
+ rtt_feedback_->OnReceivedDelaySinceLastReport(
+ rtcp_field.dlrr.receivers_ssrc,
+ rtcp_field.dlrr.last_receiver_report,
+ rtcp_field.dlrr.delay_last_receiver_report);
+ }
+}
+
+void RtcpReceiver::HandleNACK(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ if (ssrc_ != rtcp_field.nack.media_ssrc) {
+ // Not to us.
+ rtcp_parser->Iterate();
+ return;
+ }
+ std::list<uint16> nackSequenceNumbers;
+
+ RtcpFieldTypes field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpGenericRtpFeedbackNackItemCode) {
+ HandleNACKItem(&rtcp_field, &nackSequenceNumbers);
+ field_type = rtcp_parser->Iterate();
+ }
+ if (sender_feedback_) {
+ sender_feedback_->OnReceivedNackRequest(nackSequenceNumbers);
+ }
+}
+
+void RtcpReceiver::HandleNACKItem(const RtcpField* rtcp_field,
+ std::list<uint16>* nack_sequence_numbers) {
+ nack_sequence_numbers->push_back(rtcp_field->nack_item.packet_id);
+
+ uint16 bitmask = rtcp_field->nack_item.bitmask;
+ if (bitmask) {
+ for (int i = 1; i <= 16; ++i) {
+ if (bitmask & 1) {
+ nack_sequence_numbers->push_back(rtcp_field->nack_item.packet_id + i);
+ }
+ bitmask = bitmask >> 1;
+ }
+ }
+}
+
+void RtcpReceiver::HandleBYE(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ uint32 remote_ssrc = rtcp_field.bye.sender_ssrc;
+ if (remote_ssrc_ == remote_ssrc) {
+ TRACE_EVENT_INSTANT1("cast_rtcp", "BYE", TRACE_EVENT_SCOPE_THREAD,
+ "remote_ssrc", remote_ssrc);
+ }
+ rtcp_parser->Iterate();
+}
+
+void RtcpReceiver::HandlePLI(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ if (ssrc_ == rtcp_field.pli.media_ssrc) {
+ // Received a signal that we need to send a new key frame.
+ if (sender_feedback_) {
+ sender_feedback_->OnReceivedIntraFrameRequest();
+ }
+ }
+ rtcp_parser->Iterate();
+}
+
+void RtcpReceiver::HandleSendReportRequest(RtcpParser* rtcp_parser) {
+ if (receiver_feedback_) {
+ receiver_feedback_->OnReceivedSendReportRequest();
+ }
+ rtcp_parser->Iterate();
+}
+
+void RtcpReceiver::HandleRpsi(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ if (rtcp_parser->Iterate() != kRtcpPayloadSpecificRpsiCode) {
+ return;
+ }
+ if (rtcp_field.rpsi.number_of_valid_bits % 8 != 0) {
+ // Continue
+ return;
+ }
+ uint64 rpsi_picture_id = 0;
+
+ // Convert native_bit_string to rpsi_picture_id
+ uint8 bytes = rtcp_field.rpsi.number_of_valid_bits / 8;
+ for (uint8 n = 0; n < (bytes - 1); ++n) {
+ rpsi_picture_id += (rtcp_field.rpsi.native_bit_string[n] & 0x7f);
+ rpsi_picture_id <<= 7; // Prepare next.
+ }
+ rpsi_picture_id += (rtcp_field.rpsi.native_bit_string[bytes - 1] & 0x7f);
+ if (sender_feedback_) {
+ sender_feedback_->OnReceivedRpsi(rtcp_field.rpsi.payload_type,
+ rpsi_picture_id);
+ }
+}
+
+void RtcpReceiver::HandlePayloadSpecificApp(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ uint32 remote_ssrc = rtcp_field.application_specific.sender_ssrc;
+ if (remote_ssrc_ != remote_ssrc) {
+ // Message not to us.
+ rtcp_parser->Iterate();
+ return;
+ }
+
+ RtcpFieldTypes packet_type = rtcp_parser->Iterate();
+ switch (packet_type) {
+ case kRtcpPayloadSpecificRembCode:
+ packet_type = rtcp_parser->Iterate();
+ if (packet_type == kRtcpPayloadSpecificRembItemCode) {
+ HandlePayloadSpecificRembItem(rtcp_parser);
+ rtcp_parser->Iterate();
+ }
+ break;
+ case kRtcpPayloadSpecificCastCode:
+ packet_type = rtcp_parser->Iterate();
+ if (packet_type == kRtcpPayloadSpecificCastCode) {
+ HandlePayloadSpecificCastItem(rtcp_parser);
+ }
+ break;
+ default:
+ return;
+ }
+}
+
+void RtcpReceiver::HandlePayloadSpecificRembItem(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ for (int i = 0; i < rtcp_field.remb_item.number_of_ssrcs; ++i) {
+ if (rtcp_field.remb_item.ssrcs[i] == ssrc_) {
+ // Found matching ssrc.
+ if (sender_feedback_) {
+ sender_feedback_->OnReceivedRemb(rtcp_field.remb_item.bitrate);
+ }
+ return;
+ }
+ }
+}
+
+void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ RtcpCastMessage cast_message(remote_ssrc_);
+ cast_message.ack_frame_id_ = rtcp_field.cast_item.last_frame_id;
+
+ RtcpFieldTypes packet_type = rtcp_parser->Iterate();
+ while (packet_type == kRtcpPayloadSpecificCastNackItemCode) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ HandlePayloadSpecificCastNackItem(
+ &rtcp_field, &cast_message.missing_frames_and_packets_);
+ packet_type = rtcp_parser->Iterate();
+ }
+ if (sender_feedback_) {
+ sender_feedback_->OnReceivedCastFeedback(cast_message);
+ }
+}
+
+void RtcpReceiver::HandlePayloadSpecificCastNackItem(
+ const RtcpField* rtcp_field,
+ std::map<uint8, std::set<uint16> >* missing_frames_and_packets) {
+
+ std::map<uint8, std::set<uint16> >::iterator frame_it =
+ missing_frames_and_packets->find(rtcp_field->cast_nack_item.frame_id);
+
+ if (frame_it == missing_frames_and_packets->end()) {
+ // First missing packet in a frame.
+ std::set<uint16> empty_set;
+ std::pair<std::map<uint8, std::set<uint16> >::iterator, bool> ret;
+ ret = missing_frames_and_packets->insert(
+ std::pair<uint8, std::set<uint16> >(
+ rtcp_field->cast_nack_item.frame_id, empty_set));
+ frame_it = ret.first;
+ DCHECK(frame_it != missing_frames_and_packets->end()) << "Invalid state";
+ }
+ if (rtcp_field->cast_nack_item.packet_id == kRtcpCastAllPacketsLost) {
+ // Special case all packets in a frame is missing.
+ return;
+ }
+ uint16 packet_id = rtcp_field->cast_nack_item.packet_id;
+ uint8 bitmask = rtcp_field->cast_nack_item.bitmask;
+
+ frame_it->second.insert(packet_id);
+
+ if (bitmask) {
+ for (int i = 1; i <= 8; ++i) {
+ if (bitmask & 1) {
+ frame_it->second.insert(packet_id + i);
+ }
+ bitmask = bitmask >> 1;
+ }
+ }
+}
+
+void RtcpReceiver::HandleFIR(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ RtcpFieldTypes field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpPayloadSpecificFirItemCode) {
+ HandleFIRItem(&rtcp_field);
+ field_type = rtcp_parser->Iterate();
+ }
+}
+
+void RtcpReceiver::HandleFIRItem(const RtcpField* rtcp_field) {
+ // Is it our sender that is requested to generate a new keyframe.
+ if (ssrc_ != rtcp_field->fir_item.ssrc) return;
+ if (sender_feedback_) {
+ sender_feedback_->OnReceivedIntraFrameRequest();
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_receiver.h b/chromium/media/cast/rtcp/rtcp_receiver.h
new file mode 100644
index 00000000000..8c315d07b9d
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_receiver.h
@@ -0,0 +1,106 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_RTCP_RECEIVER_H_
+#define MEDIA_CAST_RTCP_RTCP_RECEIVER_H_
+
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/rtcp/rtcp_utility.h"
+
+namespace media {
+namespace cast {
+
+class RtcpReceiverFeedback {
+ public:
+ virtual void OnReceivedSenderReport(
+ const RtcpSenderInfo& remote_sender_info) = 0;
+
+ virtual void OnReceiverReferenceTimeReport(
+ const RtcpReceiverReferenceTimeReport& remote_time_report) = 0;
+
+ virtual void OnReceivedSendReportRequest() = 0;
+
+ virtual ~RtcpReceiverFeedback() {}
+};
+
+class RtcpRttFeedback {
+ public:
+ virtual void OnReceivedDelaySinceLastReport(
+ uint32 receivers_ssrc,
+ uint32 last_report,
+ uint32 delay_since_last_report) = 0;
+
+ virtual ~RtcpRttFeedback() {}
+};
+
+class RtcpReceiver {
+ public:
+ explicit RtcpReceiver(RtcpSenderFeedback* sender_feedback,
+ RtcpReceiverFeedback* receiver_feedback,
+ RtcpRttFeedback* rtt_feedback,
+ uint32 local_ssrc);
+ virtual ~RtcpReceiver();
+
+ void SetRemoteSSRC(uint32 ssrc);
+
+ void IncomingRtcpPacket(RtcpParser* rtcp_parser);
+
+ private:
+ void HandleSenderReport(RtcpParser* rtcp_parser);
+
+ void HandleReceiverReport(RtcpParser* rtcp_parser);
+
+ void HandleReportBlock(const RtcpField* rtcp_field,
+ uint32 remote_ssrc);
+
+ void HandleSDES(RtcpParser* rtcp_parser);
+ void HandleSDESChunk(RtcpParser* rtcp_parser);
+
+ void HandleBYE(RtcpParser* rtcp_parser);
+
+ void HandleXr(RtcpParser* rtcp_parser);
+ void HandleRrtr(RtcpParser* rtcp_parser, uint32 remote_ssrc);
+ void HandleDlrr(RtcpParser* rtcp_parser);
+
+ // Generic RTP Feedback.
+ void HandleNACK(RtcpParser* rtcp_parser);
+ void HandleNACKItem(const RtcpField* rtcp_field,
+ std::list<uint16>* nack_sequence_numbers);
+
+ void HandleSendReportRequest(RtcpParser* rtcp_parser);
+
+ // Payload-specific.
+ void HandlePLI(RtcpParser* rtcp_parser);
+
+ void HandleSLI(RtcpParser* rtcp_parser);
+ void HandleSLIItem(RtcpField* rtcpPacket);
+
+ void HandleRpsi(RtcpParser* rtcp_parser);
+
+ void HandleFIR(RtcpParser* rtcp_parser);
+ void HandleFIRItem(const RtcpField* rtcp_field);
+
+ void HandlePayloadSpecificApp(RtcpParser* rtcp_parser);
+ void HandlePayloadSpecificRembItem(RtcpParser* rtcp_parser);
+ void HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser);
+ void HandlePayloadSpecificCastNackItem(
+ const RtcpField* rtcp_field,
+ std::map<uint8, std::set<uint16> >* missing_frames_and_packets);
+
+ const uint32 ssrc_;
+ uint32 remote_ssrc_;
+
+ // Not owned by this class.
+ RtcpSenderFeedback* const sender_feedback_;
+ RtcpReceiverFeedback* const receiver_feedback_;
+ RtcpRttFeedback* const rtt_feedback_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpReceiver);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_RTCP_RECEIVER_H_
diff --git a/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc b/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
new file mode 100644
index 00000000000..5073944fa28
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
@@ -0,0 +1,380 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
+#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
+#include "media/cast/rtcp/rtcp_receiver.h"
+#include "media/cast/rtcp/rtcp_utility.h"
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+static const uint32 kSenderSsrc = 0x10203;
+static const uint32 kSourceSsrc = 0x40506;
+static const uint32 kUnknownSsrc = 0xDEAD;
+static const std::string kCName("test@10.1.1.1");
+
+class SenderFeedbackCastVerification : public RtcpSenderFeedback {
+ public:
+ SenderFeedbackCastVerification() : called_(false) {}
+ virtual void OnReceivedReportBlock(
+ const RtcpReportBlock& report_block) OVERRIDE {};
+ virtual void OnReceivedIntraFrameRequest() OVERRIDE {};
+ virtual void OnReceivedRpsi(uint8 payload_type,
+ uint64 picture_id) OVERRIDE {};
+ virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {};
+ virtual void OnReceivedNackRequest(
+ const std::list<uint16>& nack_sequence_numbers) OVERRIDE {};
+
+ virtual void OnReceivedCastFeedback(
+ const RtcpCastMessage& cast_feedback) OVERRIDE {
+ EXPECT_EQ(cast_feedback.media_ssrc_, kSenderSsrc);
+ EXPECT_EQ(cast_feedback.ack_frame_id_, kAckFrameId);
+
+ std::map<uint8, std::set<uint16> >::const_iterator frame_it =
+ cast_feedback.missing_frames_and_packets_.begin();
+
+ EXPECT_TRUE(frame_it != cast_feedback.missing_frames_and_packets_.end());
+ EXPECT_EQ(kLostFrameId, frame_it->first);
+ EXPECT_TRUE(frame_it->second.empty());
+ ++frame_it;
+ EXPECT_TRUE(frame_it != cast_feedback.missing_frames_and_packets_.end());
+ EXPECT_EQ(kFrameIdWithLostPackets, frame_it->first);
+ EXPECT_EQ(3UL, frame_it->second.size());
+ std::set<uint16>::const_iterator packet_it = frame_it->second.begin();
+ EXPECT_EQ(kLostPacketId1, *packet_it);
+ ++packet_it;
+ EXPECT_EQ(kLostPacketId2, *packet_it);
+ ++packet_it;
+ EXPECT_EQ(kLostPacketId3, *packet_it);
+ ++frame_it;
+ EXPECT_EQ(frame_it, cast_feedback.missing_frames_and_packets_.end());
+ called_ = true;
+ }
+
+ bool called() { return called_; }
+
+ private:
+ bool called_;
+};
+
+
+class RtcpReceiverTest : public ::testing::Test {
+ protected:
+ RtcpReceiverTest()
+ : rtcp_receiver_(new RtcpReceiver(&mock_sender_feedback_,
+ &mock_receiver_feedback_,
+ &mock_rtt_feedback_,
+ kSourceSsrc)) {
+ }
+
+ ~RtcpReceiverTest() {}
+
+ void SetUp() OVERRIDE {
+ EXPECT_CALL(mock_receiver_feedback_, OnReceivedSenderReport(_)).Times(0);
+ EXPECT_CALL(mock_receiver_feedback_,
+ OnReceiverReferenceTimeReport(_)).Times(0);
+ EXPECT_CALL(mock_receiver_feedback_,
+ OnReceivedSendReportRequest()).Times(0);
+
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedRpsi(_, _)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedRemb(_)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedNackRequest(_)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
+
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(_, _, _)).Times(0);
+
+ expected_sender_info_.ntp_seconds = kNtpHigh;
+ expected_sender_info_.ntp_fraction = kNtpLow;
+ expected_sender_info_.rtp_timestamp = kRtpTimestamp;
+ expected_sender_info_.send_packet_count = kSendPacketCount;
+ expected_sender_info_.send_octet_count = kSendOctetCount;
+
+ expected_report_block_.remote_ssrc = kSenderSsrc;
+ expected_report_block_.media_ssrc = kSourceSsrc;
+ expected_report_block_.fraction_lost = kLoss >> 24;
+ expected_report_block_.cumulative_lost = kLoss & 0xffffff;
+ expected_report_block_.extended_high_sequence_number = kExtendedMax;
+ expected_report_block_.jitter = kJitter;
+ expected_report_block_.last_sr = kLastSr;
+ expected_report_block_.delay_since_last_sr = kDelayLastSr;
+ expected_receiver_reference_report_.remote_ssrc = kSenderSsrc;
+ expected_receiver_reference_report_.ntp_seconds = kNtpHigh;
+ expected_receiver_reference_report_.ntp_fraction = kNtpLow;
+ }
+
+ // Injects an RTCP packet into the receiver.
+ void InjectRtcpPacket(const uint8* packet, uint16 length) {
+ RtcpParser rtcp_parser(packet, length);
+ rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
+ }
+
+ MockRtcpReceiverFeedback mock_receiver_feedback_;
+ MockRtcpRttFeedback mock_rtt_feedback_;
+ MockRtcpSenderFeedback mock_sender_feedback_;
+ scoped_ptr<RtcpReceiver> rtcp_receiver_;
+ RtcpSenderInfo expected_sender_info_;
+ RtcpReportBlock expected_report_block_;
+ RtcpReceiverReferenceTimeReport expected_receiver_reference_report_;
+};
+
+TEST_F(RtcpReceiverTest, BrokenPacketIsIgnored) {
+ const uint8 bad_packet[] = {0, 0, 0, 0};
+ InjectRtcpPacket(bad_packet, sizeof(bad_packet));
+}
+
+TEST_F(RtcpReceiverTest, InjectSenderReportPacket) {
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSenderSsrc, 0);
+
+ // Expected to be ignored since the sender ssrc does not match our
+ // remote ssrc.
+ InjectRtcpPacket(p.Packet(), p.Length());
+
+ EXPECT_CALL(mock_receiver_feedback_,
+ OnReceivedSenderReport(expected_sender_info_)).Times(1);
+ rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
+
+ // Expected to be pass through since the sender ssrc match our remote ssrc.
+ InjectRtcpPacket(p.Packet(), p.Length());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiveReportPacket) {
+ TestRtcpPacketBuilder p1;
+ p1.AddRr(kSenderSsrc, 1);
+ p1.AddRb(kUnknownSsrc);
+
+ // Expected to be ignored since the source ssrc does not match our
+ // local ssrc.
+ InjectRtcpPacket(p1.Packet(), p1.Length());
+
+ EXPECT_CALL(mock_sender_feedback_,
+ OnReceivedReportBlock(expected_report_block_)).Times(1);
+
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+
+ TestRtcpPacketBuilder p2;
+ p2.AddRr(kSenderSsrc, 1);
+ p2.AddRb(kSourceSsrc);
+
+ // Expected to be pass through since the sender ssrc match our local ssrc.
+ InjectRtcpPacket(p2.Packet(), p2.Length());
+}
+
+TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
+ TestRtcpPacketBuilder p1;
+ p1.AddSr(kSenderSsrc, 1);
+ p1.AddRb(kUnknownSsrc);
+
+ // Sender report expected to be ignored since the sender ssrc does not match
+ // our remote ssrc.
+ // Report block expected to be ignored since the source ssrc does not match
+ // our local ssrc.
+ InjectRtcpPacket(p1.Packet(), p1.Length());
+
+ EXPECT_CALL(mock_receiver_feedback_,
+ OnReceivedSenderReport(expected_sender_info_)).Times(1);
+ rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
+
+ // Sender report expected to be pass through since the sender ssrc match our
+ // remote ssrc.
+ // Report block expected to be ignored since the source ssrc does not match
+ // our local ssrc.
+ InjectRtcpPacket(p1.Packet(), p1.Length());
+
+ EXPECT_CALL(mock_receiver_feedback_, OnReceivedSenderReport(_)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_,
+ OnReceivedReportBlock(expected_report_block_)).Times(1);
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+
+ rtcp_receiver_->SetRemoteSSRC(0);
+
+ TestRtcpPacketBuilder p2;
+ p2.AddSr(kSenderSsrc, 1);
+ p2.AddRb(kSourceSsrc);
+
+ // Sender report expected to be ignored since the sender ssrc does not match
+ // our remote ssrc.
+ // Receiver report expected to be pass through since the sender ssrc match
+ // our local ssrc.
+ InjectRtcpPacket(p2.Packet(), p2.Length());
+
+ EXPECT_CALL(mock_receiver_feedback_,
+ OnReceivedSenderReport(expected_sender_info_)).Times(1);
+ EXPECT_CALL(mock_sender_feedback_,
+ OnReceivedReportBlock(expected_report_block_)).Times(1);
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+
+ rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
+
+ // Sender report expected to be pass through since the sender ssrc match our
+ // remote ssrc.
+ // Receiver report expected to be pass through since the sender ssrc match
+ // our local ssrc.
+ InjectRtcpPacket(p2.Packet(), p2.Length());
+}
+
+TEST_F(RtcpReceiverTest, InjectSenderReportPacketWithDlrr) {
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSenderSsrc, 0);
+ p.AddXrHeader(kSenderSsrc);
+ p.AddXrUnknownBlock();
+ p.AddXrExtendedDlrrBlock(kSenderSsrc);
+ p.AddXrUnknownBlock();
+ p.AddSdesCname(kSenderSsrc, kCName);
+
+ // Expected to be ignored since the source ssrc does not match our
+ // local ssrc.
+ InjectRtcpPacket(p.Packet(), p.Length());
+
+ EXPECT_CALL(mock_receiver_feedback_,
+ OnReceivedSenderReport(expected_sender_info_)).Times(1);
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSenderSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+
+ // Enable receiving sender report.
+ rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
+
+ // Expected to be pass through since the sender ssrc match our local ssrc.
+ InjectRtcpPacket(p.Packet(), p.Length());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithRrtr) {
+ TestRtcpPacketBuilder p1;
+ p1.AddRr(kSenderSsrc, 1);
+ p1.AddRb(kUnknownSsrc);
+ p1.AddXrHeader(kSenderSsrc);
+ p1.AddXrRrtrBlock();
+
+ // Expected to be ignored since the source ssrc does not match our
+ // local ssrc.
+ InjectRtcpPacket(p1.Packet(), p1.Length());
+
+ EXPECT_CALL(mock_sender_feedback_,
+ OnReceivedReportBlock(expected_report_block_)).Times(1);
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+ EXPECT_CALL(mock_receiver_feedback_, OnReceiverReferenceTimeReport(
+ expected_receiver_reference_report_)).Times(1);
+
+ // Enable receiving reference time report.
+ rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
+
+ TestRtcpPacketBuilder p2;
+ p2.AddRr(kSenderSsrc, 1);
+ p2.AddRb(kSourceSsrc);
+ p2.AddXrHeader(kSenderSsrc);
+ p2.AddXrRrtrBlock();
+
+ // Expected to be pass through since the sender ssrc match our local ssrc.
+ InjectRtcpPacket(p2.Packet(), p2.Length());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithIntraFrameRequest) {
+ TestRtcpPacketBuilder p1;
+ p1.AddRr(kSenderSsrc, 1);
+ p1.AddRb(kUnknownSsrc);
+ p1.AddPli(kSenderSsrc, kUnknownSsrc);
+
+ // Expected to be ignored since the source ssrc does not match our
+ // local ssrc.
+ InjectRtcpPacket(p1.Packet(), p1.Length());
+
+ EXPECT_CALL(mock_sender_feedback_,
+ OnReceivedReportBlock(expected_report_block_)).Times(1);
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(1);
+
+ TestRtcpPacketBuilder p2;
+ p2.AddRr(kSenderSsrc, 1);
+ p2.AddRb(kSourceSsrc);
+ p2.AddPli(kSenderSsrc, kSourceSsrc);
+
+ // Expected to be pass through since the sender ssrc match our local ssrc.
+ InjectRtcpPacket(p2.Packet(), p2.Length());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
+ TestRtcpPacketBuilder p1;
+ p1.AddRr(kSenderSsrc, 1);
+ p1.AddRb(kUnknownSsrc);
+ p1.AddCast(kSenderSsrc, kUnknownSsrc);
+
+ // Expected to be ignored since the source ssrc does not match our
+ // local ssrc.
+ InjectRtcpPacket(p1.Packet(), p1.Length());
+
+ EXPECT_CALL(mock_sender_feedback_,
+ OnReceivedReportBlock(expected_report_block_)).Times(1);
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(1);
+
+ // Enable receiving the cast feedback.
+ rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
+
+ TestRtcpPacketBuilder p2;
+ p2.AddRr(kSenderSsrc, 1);
+ p2.AddRb(kSourceSsrc);
+ p2.AddCast(kSenderSsrc, kSourceSsrc);
+
+ // Expected to be pass through since the sender ssrc match our local ssrc.
+ InjectRtcpPacket(p2.Packet(), p2.Length());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
+ SenderFeedbackCastVerification sender_feedback_cast_verification;
+ RtcpReceiver rtcp_receiver(&sender_feedback_cast_verification,
+ &mock_receiver_feedback_,
+ &mock_rtt_feedback_,
+ kSourceSsrc);
+
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc,
+ kLastSr,
+ kDelayLastSr)).Times(1);
+
+ // Enable receiving the cast feedback.
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSenderSsrc, 1);
+ p.AddRb(kSourceSsrc);
+ p.AddCast(kSenderSsrc, kSourceSsrc);
+
+ // Expected to be pass through since the sender ssrc match our local ssrc.
+ RtcpParser rtcp_parser(p.Packet(), p.Length());
+ rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
+
+ EXPECT_TRUE(sender_feedback_cast_verification.called());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_sender.cc b/chromium/media/cast/rtcp/rtcp_sender.cc
new file mode 100644
index 00000000000..89ea05e0531
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_sender.cc
@@ -0,0 +1,544 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/rtcp_sender.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/rtcp/rtcp_utility.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+static const int kRtcpMaxNackFields = 253;
+static const int kRtcpMaxCastLossFields = 100;
+
+RtcpSender::RtcpSender(PacedPacketSender* outgoing_transport,
+ uint32 sending_ssrc,
+ const std::string& c_name)
+ : ssrc_(sending_ssrc),
+ c_name_(c_name),
+ transport_(outgoing_transport) {
+ DCHECK_LT(c_name_.length(), kRtcpCnameSize) << "Invalid config";
+}
+
+RtcpSender::~RtcpSender() {}
+
+void RtcpSender::SendRtcp(uint32 packet_type_flags,
+ const RtcpSenderInfo* sender_info,
+ const RtcpReportBlock* report_block,
+ uint32 pli_remote_ssrc,
+ const RtcpDlrrReportBlock* dlrr,
+ const RtcpReceiverReferenceTimeReport* rrtr,
+ const RtcpCastMessage* cast_message) {
+ std::vector<uint8> packet;
+ packet.reserve(kIpPacketSize);
+ if (packet_type_flags & kRtcpSr) {
+ DCHECK(sender_info) << "Invalid argument";
+ BuildSR(*sender_info, report_block, &packet);
+ BuildSdec(&packet);
+ } else if (packet_type_flags & kRtcpRr) {
+ BuildRR(report_block, &packet);
+ if (!c_name_.empty()) {
+ BuildSdec(&packet);
+ }
+ }
+ if (packet_type_flags & kRtcpPli) {
+ BuildPli(pli_remote_ssrc, &packet);
+ }
+ if (packet_type_flags & kRtcpBye) {
+ BuildBye(&packet);
+ }
+ if (packet_type_flags & kRtcpRpsi) {
+ // Implement this for webrtc interop.
+ NOTIMPLEMENTED();
+ }
+ if (packet_type_flags & kRtcpRemb) {
+ // Implement this for webrtc interop.
+ NOTIMPLEMENTED();
+ }
+ if (packet_type_flags & kRtcpNack) {
+ // Implement this for webrtc interop.
+ NOTIMPLEMENTED();
+ }
+ if (packet_type_flags & kRtcpDlrr) {
+ DCHECK(dlrr) << "Invalid argument";
+ BuildDlrrRb(dlrr, &packet);
+ }
+ if (packet_type_flags & kRtcpRrtr) {
+ DCHECK(rrtr) << "Invalid argument";
+ BuildRrtr(rrtr, &packet);
+ }
+ if (packet_type_flags & kRtcpCast) {
+ DCHECK(cast_message) << "Invalid argument";
+ BuildCast(cast_message, &packet);
+ }
+
+ if (packet.empty()) return; // Sanity don't send empty packets.
+
+ transport_->SendRtcpPacket(packet);
+}
+
+void RtcpSender::BuildSR(const RtcpSenderInfo& sender_info,
+ const RtcpReportBlock* report_block,
+ std::vector<uint8>* packet) const {
+ // Sender report.
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 52, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 52 > kIpPacketSize) return;
+
+ uint16 number_of_rows = (report_block) ? 12 : 6;
+ packet->resize(start_size + 28);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 28);
+ big_endian_writer.WriteU8(0x80 + (report_block ? 1 : 0));
+ big_endian_writer.WriteU8(200);
+ big_endian_writer.WriteU16(number_of_rows);
+ big_endian_writer.WriteU32(ssrc_);
+ big_endian_writer.WriteU32(sender_info.ntp_seconds);
+ big_endian_writer.WriteU32(sender_info.ntp_fraction);
+ big_endian_writer.WriteU32(sender_info.rtp_timestamp);
+ big_endian_writer.WriteU32(sender_info.send_packet_count);
+ big_endian_writer.WriteU32(sender_info.send_octet_count);
+
+ if (report_block) {
+ AddReportBlocks(*report_block, packet); // Adds 24 bytes.
+ }
+}
+
+void RtcpSender::BuildRR(const RtcpReportBlock* report_block,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 32, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 32 > kIpPacketSize) return;
+
+ uint16 number_of_rows = (report_block) ? 7 : 1;
+ packet->resize(start_size + 8);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 8);
+ big_endian_writer.WriteU8(0x80 + (report_block ? 1 : 0));
+ big_endian_writer.WriteU8(201);
+ big_endian_writer.WriteU16(number_of_rows);
+ big_endian_writer.WriteU32(ssrc_);
+
+ if (report_block) {
+ AddReportBlocks(*report_block, packet); // Adds 24 bytes.
+ }
+}
+
+void RtcpSender::AddReportBlocks(const RtcpReportBlock& report_block,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 24, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 24 > kIpPacketSize) return;
+
+ packet->resize(start_size + 24);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
+ big_endian_writer.WriteU32(report_block.media_ssrc);
+ big_endian_writer.WriteU8(report_block.fraction_lost);
+ big_endian_writer.WriteU8(report_block.cumulative_lost >> 16);
+ big_endian_writer.WriteU8(report_block.cumulative_lost >> 8);
+ big_endian_writer.WriteU8(report_block.cumulative_lost);
+
+ // Extended highest seq_no, contain the highest sequence number received.
+ big_endian_writer.WriteU32(report_block.extended_high_sequence_number);
+ big_endian_writer.WriteU32(report_block.jitter);
+
+ // Last SR timestamp; our NTP time when we received the last report.
+ // This is the value that we read from the send report packet not when we
+ // received it.
+ big_endian_writer.WriteU32(report_block.last_sr);
+
+ // Delay since last received report, time since we received the report.
+ big_endian_writer.WriteU32(report_block.delay_since_last_sr);
+}
+
+void RtcpSender::BuildSdec(std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 12 + c_name_.length(), kIpPacketSize)
+ << "Not enough buffer space";
+ if (start_size + 12 > kIpPacketSize) return;
+
+ // SDES Source Description.
+ packet->resize(start_size + 10);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 10);
+ // We always need to add one SDES CNAME.
+ big_endian_writer.WriteU8(0x80 + 1);
+ big_endian_writer.WriteU8(202);
+
+ // Handle SDES length later on.
+ uint32 sdes_length_position = start_size + 3;
+ big_endian_writer.WriteU16(0);
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU8(1); // CNAME = 1
+ big_endian_writer.WriteU8(static_cast<uint8>(c_name_.length()));
+
+ size_t sdes_length = 10 + c_name_.length();
+ packet->insert(packet->end(), c_name_.c_str(),
+ c_name_.c_str() + c_name_.length());
+
+ size_t padding = 0;
+
+ // We must have a zero field even if we have an even multiple of 4 bytes.
+ if ((packet->size() % 4) == 0) {
+ padding++;
+ packet->push_back(0);
+ }
+ while ((packet->size() % 4) != 0) {
+ padding++;
+ packet->push_back(0);
+ }
+ sdes_length += padding;
+
+ // In 32-bit words minus one and we don't count the header.
+ uint8 buffer_length = (sdes_length / 4) - 1;
+ (*packet)[sdes_length_position] = buffer_length;
+}
+
+void RtcpSender::BuildPli(uint32 remote_ssrc,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 12, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 12 > kIpPacketSize) return;
+
+ packet->resize(start_size + 12);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 12);
+ uint8 FMT = 1; // Picture loss indicator.
+ big_endian_writer.WriteU8(0x80 + FMT);
+ big_endian_writer.WriteU8(206);
+ big_endian_writer.WriteU16(2); // Used fixed length of 2.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(remote_ssrc); // Add the remote SSRC.
+ TRACE_EVENT_INSTANT2("cast_rtcp", "RtcpSender::PLI", TRACE_EVENT_SCOPE_THREAD,
+ "remote_ssrc", remote_ssrc,
+ "ssrc", ssrc_);
+}
+
+/*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | PB |0| Payload Type| Native Rpsi bit string |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | defined per codec ... | Padding (0) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+void RtcpSender::BuildRpsi(const RtcpRpsiMessage* rpsi,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 24, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 24 > kIpPacketSize) return;
+
+ packet->resize(start_size + 24);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
+ uint8 FMT = 3; // Reference Picture Selection Indication.
+ big_endian_writer.WriteU8(0x80 + FMT);
+ big_endian_writer.WriteU8(206);
+
+ // Calculate length.
+ uint32 bits_required = 7;
+ uint8 bytes_required = 1;
+ while ((rpsi->picture_id >> bits_required) > 0) {
+ bits_required += 7;
+ bytes_required++;
+ }
+ uint8 size = 3;
+ if (bytes_required > 6) {
+ size = 5;
+ } else if (bytes_required > 2) {
+ size = 4;
+ }
+ big_endian_writer.WriteU8(0);
+ big_endian_writer.WriteU8(size);
+ big_endian_writer.WriteU32(ssrc_);
+ big_endian_writer.WriteU32(rpsi->remote_ssrc);
+
+ uint8 padding_bytes = 4 - ((2 + bytes_required) % 4);
+ if (padding_bytes == 4) {
+ padding_bytes = 0;
+ }
+ // Add padding length in bits, padding can be 0, 8, 16 or 24.
+ big_endian_writer.WriteU8(padding_bytes * 8);
+ big_endian_writer.WriteU8(rpsi->payload_type);
+
+ // Add picture ID.
+ for (int i = bytes_required - 1; i > 0; i--) {
+ big_endian_writer.WriteU8(
+ 0x80 | static_cast<uint8>(rpsi->picture_id >> (i * 7)));
+ }
+ // Add last byte of picture ID.
+ big_endian_writer.WriteU8(static_cast<uint8>(rpsi->picture_id & 0x7f));
+
+ // Add padding.
+ for (int j = 0; j < padding_bytes; ++j) {
+ big_endian_writer.WriteU8(0);
+ }
+}
+
+void RtcpSender::BuildRemb(const RtcpRembMessage* remb,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ size_t remb_size = 20 + 4 * remb->remb_ssrcs.size();
+ DCHECK_LT(start_size + remb_size, kIpPacketSize)
+ << "Not enough buffer space";
+ if (start_size + remb_size > kIpPacketSize) return;
+
+ packet->resize(start_size + remb_size);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), remb_size);
+
+ // Add application layer feedback.
+ uint8 FMT = 15;
+ big_endian_writer.WriteU8(0x80 + FMT);
+ big_endian_writer.WriteU8(206);
+ big_endian_writer.WriteU8(0);
+ big_endian_writer.WriteU8(remb->remb_ssrcs.size() + 4);
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(0); // Remote SSRC must be 0.
+ big_endian_writer.WriteU32(kRemb);
+ big_endian_writer.WriteU8(remb->remb_ssrcs.size());
+
+ // 6 bit exponent and a 18 bit mantissa.
+ uint8 bitrate_exponent;
+ uint32 bitrate_mantissa;
+ BitrateToRembExponentBitrate(remb->remb_bitrate,
+ &bitrate_exponent,
+ &bitrate_mantissa);
+
+ big_endian_writer.WriteU8(static_cast<uint8>((bitrate_exponent << 2) +
+ ((bitrate_mantissa >> 16) & 0x03)));
+ big_endian_writer.WriteU8(static_cast<uint8>(bitrate_mantissa >> 8));
+ big_endian_writer.WriteU8(static_cast<uint8>(bitrate_mantissa));
+
+ std::list<uint32>::const_iterator it = remb->remb_ssrcs.begin();
+ for (; it != remb->remb_ssrcs.end(); ++it) {
+ big_endian_writer.WriteU32(*it);
+ }
+ TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::RembBitrate", ssrc_,
+ remb->remb_bitrate);
+}
+
+void RtcpSender::BuildNack(const RtcpNackMessage* nack,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 16, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 16 > kIpPacketSize) return;
+
+ packet->resize(start_size + 16);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 16);
+
+ uint8 FMT = 1;
+ big_endian_writer.WriteU8(0x80 + FMT);
+ big_endian_writer.WriteU8(205);
+ big_endian_writer.WriteU8(0);
+ int nack_size_pos = start_size + 3;
+ big_endian_writer.WriteU8(3);
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(nack->remote_ssrc); // Add the remote SSRC.
+
+ // Build NACK bitmasks and write them to the Rtcp message.
+ // The nack list should be sorted and not contain duplicates.
+ int number_of_nack_fields = 0;
+ int max_number_of_nack_fields =
+ std::min<int>(kRtcpMaxNackFields, (kIpPacketSize - packet->size()) / 4);
+
+ std::list<uint16>::const_iterator it = nack->nack_list.begin();
+ while (it != nack->nack_list.end() &&
+ number_of_nack_fields < max_number_of_nack_fields) {
+ uint16 nack_sequence_number = *it;
+ uint16 bitmask = 0;
+ ++it;
+ while (it != nack->nack_list.end()) {
+ int shift = static_cast<uint16>(*it - nack_sequence_number) - 1;
+ if (shift >= 0 && shift <= 15) {
+ bitmask |= (1 << shift);
+ ++it;
+ } else {
+ break;
+ }
+ }
+ // Write the sequence number and the bitmask to the packet.
+ start_size = packet->size();
+ DCHECK_LT(start_size + 4, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 4 > kIpPacketSize) return;
+
+ packet->resize(start_size + 4);
+ net::BigEndianWriter big_endian_nack_writer(&((*packet)[start_size]), 4);
+ big_endian_nack_writer.WriteU16(nack_sequence_number);
+ big_endian_nack_writer.WriteU16(bitmask);
+ number_of_nack_fields++;
+ }
+ (*packet)[nack_size_pos] = static_cast<uint8>(2 + number_of_nack_fields);
+ TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::NACK", ssrc_,
+ nack->nack_list.size());
+}
+
+void RtcpSender::BuildBye(std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 8, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 8 > kIpPacketSize) return;
+
+ packet->resize(start_size + 8);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 8);
+ big_endian_writer.WriteU8(0x80 + 1);
+ big_endian_writer.WriteU8(203);
+ big_endian_writer.WriteU16(1); // Length.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+}
+
+/*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |V=2|P|reserved | PT=XR=207 | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SSRC |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | BT=5 | reserved | block length |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ | SSRC_1 (SSRC of first receiver) | sub-
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
+ | last RR (LRR) | 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | delay since last RR (DLRR) |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+*/
+void RtcpSender::BuildDlrrRb(const RtcpDlrrReportBlock* dlrr,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 24, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 24 > kIpPacketSize) return;
+
+ packet->resize(start_size + 24);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
+ big_endian_writer.WriteU8(0x80);
+ big_endian_writer.WriteU8(207);
+ big_endian_writer.WriteU16(5); // Length.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU8(5); // Add block type.
+ big_endian_writer.WriteU8(0); // Add reserved.
+ big_endian_writer.WriteU16(3); // Block length.
+ big_endian_writer.WriteU32(ssrc_); // Add the media (received RTP) SSRC.
+ big_endian_writer.WriteU32(dlrr->last_rr);
+ big_endian_writer.WriteU32(dlrr->delay_since_last_rr);
+}
+
+void RtcpSender::BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 20, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 20 > kIpPacketSize) return;
+
+ packet->resize(start_size + 20);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 20);
+
+ big_endian_writer.WriteU8(0x80);
+ big_endian_writer.WriteU8(207);
+ big_endian_writer.WriteU16(4); // Length.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU8(4); // Add block type.
+ big_endian_writer.WriteU8(0); // Add reserved.
+ big_endian_writer.WriteU16(2); // Block length.
+
+ // Add the media (received RTP) SSRC.
+ big_endian_writer.WriteU32(rrtr->ntp_seconds);
+ big_endian_writer.WriteU32(rrtr->ntp_fraction);
+}
+
+void RtcpSender::BuildCast(const RtcpCastMessage* cast,
+ std::vector<uint8>* packet) const {
+ size_t start_size = packet->size();
+ DCHECK_LT(start_size + 20, kIpPacketSize) << "Not enough buffer space";
+ if (start_size + 20 > kIpPacketSize) return;
+
+ packet->resize(start_size + 20);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 20);
+ uint8 FMT = 15; // Application layer feedback.
+ big_endian_writer.WriteU8(0x80 + FMT);
+ big_endian_writer.WriteU8(206);
+ big_endian_writer.WriteU8(0);
+ int cast_size_pos = start_size + 3; // Save length position.
+ big_endian_writer.WriteU8(4);
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(cast->media_ssrc_); // Remote SSRC.
+ big_endian_writer.WriteU32(kCast);
+ big_endian_writer.WriteU8(cast->ack_frame_id_);
+ int cast_loss_field_pos = start_size + 17; // Save loss field position.
+ big_endian_writer.WriteU8(0); // Overwritten with number_of_loss_fields.
+ big_endian_writer.WriteU8(0); // Reserved.
+ big_endian_writer.WriteU8(0); // Reserved.
+
+ int number_of_loss_fields = 0;
+ int max_number_of_loss_fields = std::min<int>(kRtcpMaxCastLossFields,
+ (kIpPacketSize - packet->size()) / 4);
+
+ std::map<uint8, std::set<uint16> >::const_iterator frame_it =
+ cast->missing_frames_and_packets_.begin();
+
+ for (; frame_it != cast->missing_frames_and_packets_.end() &&
+ number_of_loss_fields < max_number_of_loss_fields; ++frame_it) {
+ // Iterate through all frames with missing packets.
+ if (frame_it->second.empty()) {
+ // Special case all packets in a frame is missing.
+ start_size = packet->size();
+ packet->resize(start_size + 4);
+ net::BigEndianWriter big_endian_nack_writer(&((*packet)[start_size]), 4);
+ big_endian_nack_writer.WriteU8(frame_it->first);
+ big_endian_nack_writer.WriteU16(kRtcpCastAllPacketsLost);
+ big_endian_nack_writer.WriteU8(0);
+ ++number_of_loss_fields;
+ } else {
+ std::set<uint16>::const_iterator packet_it = frame_it->second.begin();
+ while (packet_it != frame_it->second.end()) {
+ uint16 packet_id = *packet_it;
+
+ start_size = packet->size();
+ packet->resize(start_size + 4);
+ net::BigEndianWriter big_endian_nack_writer(
+ &((*packet)[start_size]), 4);
+
+ // Write frame and packet id to buffer before calculating bitmask.
+ big_endian_nack_writer.WriteU8(frame_it->first);
+ big_endian_nack_writer.WriteU16(packet_id);
+
+ uint8 bitmask = 0;
+ ++packet_it;
+ while (packet_it != frame_it->second.end()) {
+ int shift = static_cast<uint8>(*packet_it - packet_id) - 1;
+ if (shift >= 0 && shift <= 7) {
+ bitmask |= (1 << shift);
+ ++packet_it;
+ } else {
+ break;
+ }
+ }
+ big_endian_nack_writer.WriteU8(bitmask);
+ ++number_of_loss_fields;
+ }
+ }
+ }
+ (*packet)[cast_size_pos] = static_cast<uint8>(4 + number_of_loss_fields);
+ (*packet)[cast_loss_field_pos] = static_cast<uint8>(number_of_loss_fields);
+
+ // Frames with missing packets.
+ TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::CastNACK", ssrc_,
+ cast->missing_frames_and_packets_.size());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_sender.h b/chromium/media/cast/rtcp/rtcp_sender.h
new file mode 100644
index 00000000000..7dbbc0f95b5
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_sender.h
@@ -0,0 +1,111 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_RTCP_SENDER_H_
+#define MEDIA_CAST_RTCP_RTCP_SENDER_H_
+
+#include <list>
+#include <string>
+
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtcpSender {
+ public:
+ RtcpSender(PacedPacketSender* const paced_packet_sender,
+ uint32 sending_ssrc,
+ const std::string& c_name);
+
+ virtual ~RtcpSender();
+
+ void SendRtcp(uint32 packet_type_flags,
+ const RtcpSenderInfo* sender_info,
+ const RtcpReportBlock* report_block,
+ uint32 pli_remote_ssrc,
+ const RtcpDlrrReportBlock* dlrr,
+ const RtcpReceiverReferenceTimeReport* rrtr,
+ const RtcpCastMessage* cast_message);
+
+ enum RtcpPacketType {
+ kRtcpSr = 0x0002,
+ kRtcpRr = 0x0004,
+ kRtcpBye = 0x0008,
+ kRtcpPli = 0x0010,
+ kRtcpNack = 0x0020,
+ kRtcpFir = 0x0040,
+ kRtcpSrReq = 0x0200,
+ kRtcpDlrr = 0x0400,
+ kRtcpRrtr = 0x0800,
+ kRtcpRpsi = 0x8000,
+ kRtcpRemb = 0x10000,
+ kRtcpCast = 0x20000,
+ };
+
+ private:
+ void BuildSR(const RtcpSenderInfo& sender_info,
+ const RtcpReportBlock* report_block,
+ std::vector<uint8>* packet) const;
+
+ void BuildRR(const RtcpReportBlock* report_block,
+ std::vector<uint8>* packet) const;
+
+ void AddReportBlocks(const RtcpReportBlock& report_block,
+ std::vector<uint8>* packet) const;
+
+ void BuildSdec(std::vector<uint8>* packet) const;
+
+ void BuildPli(uint32 remote_ssrc,
+ std::vector<uint8>* packet) const;
+
+ void BuildRemb(const RtcpRembMessage* remb,
+ std::vector<uint8>* packet) const;
+
+ void BuildRpsi(const RtcpRpsiMessage* rpsi,
+ std::vector<uint8>* packet) const;
+
+ void BuildNack(const RtcpNackMessage* nack,
+ std::vector<uint8>* packet) const;
+
+ void BuildBye(std::vector<uint8>* packet) const;
+
+ void BuildDlrrRb(const RtcpDlrrReportBlock* dlrr,
+ std::vector<uint8>* packet) const;
+
+ void BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
+ std::vector<uint8>* packet) const;
+
+ void BuildCast(const RtcpCastMessage* cast_message,
+ std::vector<uint8>* packet) const;
+
+ inline void BitrateToRembExponentBitrate(uint32 bitrate,
+ uint8* exponent,
+ uint32* mantissa) const {
+ // 6 bit exponent and a 18 bit mantissa.
+ *exponent = 0;
+ for (int i = 0; i < 64; ++i) {
+ if (bitrate <= (262143u << i)) {
+ *exponent = i;
+ break;
+ }
+ }
+ *mantissa = (bitrate >> *exponent);
+ }
+
+ const uint32 ssrc_;
+ const std::string c_name_;
+
+ // Not owned by this class.
+ PacedPacketSender* transport_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpSender);
+};
+
+} // namespace cast
+} // namespace media
+#endif // MEDIA_CAST_RTCP_RTCP_SENDER_H_
diff --git a/chromium/media/cast/rtcp/rtcp_sender_unittest.cc b/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
new file mode 100644
index 00000000000..b7daf37719f
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
@@ -0,0 +1,285 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/rtcp/rtcp_sender.h"
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+static const int kRtcpInterval = 1000;
+static const uint32 kSendingSsrc = 0x12345678;
+static const uint32 kMediaSsrc = 0x87654321;
+static const std::string kCName("test@10.1.1.1");
+
+class TestRtcpTransport : public PacedPacketSender {
+ public:
+ TestRtcpTransport()
+ : expected_packet_length_(0),
+ packet_count_(0) {
+ }
+
+ virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
+ EXPECT_EQ(expected_packet_length_, packet.size());
+ EXPECT_EQ(0, memcmp(expected_packet_, &(packet[0]), packet.size()));
+ packet_count_++;
+ return true;
+ }
+
+ virtual bool SendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) {
+ return false;
+ }
+
+ virtual bool ResendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) {
+ return false;
+ }
+
+ void SetExpectedRtcpPacket(const uint8* rtcp_buffer, int length) {
+ expected_packet_length_ = length;
+ memcpy(expected_packet_, rtcp_buffer, length);
+ }
+
+ int packet_count() { return packet_count_; }
+
+ private:
+ uint8 expected_packet_[kIpPacketSize];
+ size_t expected_packet_length_;
+ int packet_count_;
+};
+
+class RtcpSenderTest : public ::testing::Test {
+ protected:
+ RtcpSenderTest()
+ : rtcp_sender_(new RtcpSender(&test_transport_,
+ kSendingSsrc,
+ kCName)) {
+ }
+
+ TestRtcpTransport test_transport_;
+ scoped_ptr<RtcpSender> rtcp_sender_;
+};
+
+TEST_F(RtcpSenderTest, RtcpSenderReport) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = kNtpHigh;
+ sender_info.ntp_fraction = kNtpLow;
+ sender_info.rtp_timestamp = kRtpTimestamp;
+ sender_info.send_packet_count = kSendPacketCount;
+ sender_info.send_octet_count = kSendOctetCount;
+
+ // Sender report + c_name.
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSendingSsrc, 0);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ rtcp_sender_->SendRtcp(RtcpSender::kRtcpSr,
+ &sender_info,
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReport) {
+ // Empty receiver report + c_name.
+ TestRtcpPacketBuilder p1;
+ p1.AddRr(kSendingSsrc, 0);
+ p1.AddSdesCname(kSendingSsrc, kCName);
+ test_transport_.SetExpectedRtcpPacket(p1.Packet(), p1.Length());
+
+ rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr,
+ NULL,
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+
+ // Receiver report with report block + c_name.
+ TestRtcpPacketBuilder p2;
+ p2.AddRr(kSendingSsrc, 1);
+ p2.AddRb(kMediaSsrc);
+ p2.AddSdesCname(kSendingSsrc, kCName);
+ test_transport_.SetExpectedRtcpPacket(p2.Packet(), p2.Length());
+
+ RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number =
+ kExtendedMax;
+ report_block.jitter = kJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+
+ rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr,
+ NULL,
+ &report_block,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ EXPECT_EQ(2, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpSenderReportWithDlrr) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = kNtpHigh;
+ sender_info.ntp_fraction = kNtpLow;
+ sender_info.rtp_timestamp = kRtpTimestamp;
+ sender_info.send_packet_count = kSendPacketCount;
+ sender_info.send_octet_count = kSendOctetCount;
+
+ // Sender report + c_name + dlrr.
+ TestRtcpPacketBuilder p1;
+ p1.AddSr(kSendingSsrc, 0);
+ p1.AddSdesCname(kSendingSsrc, kCName);
+ p1.AddXrHeader(kSendingSsrc);
+ p1.AddXrDlrrBlock(kSendingSsrc);
+ test_transport_.SetExpectedRtcpPacket(p1.Packet(), p1.Length());
+
+ RtcpDlrrReportBlock dlrr_rb;
+ dlrr_rb.last_rr = kLastRr;
+ dlrr_rb.delay_since_last_rr = kDelayLastRr;
+
+ rtcp_sender_->SendRtcp(RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr,
+ &sender_info,
+ NULL,
+ 0,
+ &dlrr_rb,
+ NULL,
+ NULL);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
+ // Receiver report with report block + c_name.
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddXrHeader(kSendingSsrc);
+ p.AddXrRrtrBlock();
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number =
+ kExtendedMax;
+ report_block.jitter = kJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+
+ RtcpReceiverReferenceTimeReport rrtr;
+ rrtr.ntp_seconds = kNtpHigh;
+ rrtr.ntp_fraction = kNtpLow;
+
+ rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr,
+ NULL,
+ &report_block,
+ 0,
+ NULL,
+ &rrtr,
+ NULL);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
+ // Receiver report with report block + c_name.
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddCast(kSendingSsrc, kMediaSsrc);
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+
+ RtcpCastMessage cast_message(kMediaSsrc);
+ cast_message.ack_frame_id_ = kAckFrameId;
+ std::set<uint16_t> missing_packets;
+ cast_message.missing_frames_and_packets_[
+ kLostFrameId] = missing_packets;
+
+ missing_packets.insert(kLostPacketId1);
+ missing_packets.insert(kLostPacketId2);
+ missing_packets.insert(kLostPacketId3);
+ cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
+ missing_packets;
+
+ rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpCast,
+ NULL,
+ &report_block,
+ 0,
+ NULL,
+ NULL,
+ &cast_message);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithIntraFrameRequest) {
+ // Receiver report with report block + c_name.
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddPli(kSendingSsrc, kMediaSsrc);
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number =
+ kExtendedMax;
+ report_block.jitter = kJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+
+ rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpPli,
+ NULL,
+ &report_block,
+ kMediaSsrc,
+ NULL,
+ NULL,
+ NULL);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_unittest.cc b/chromium/media/cast/rtcp/rtcp_unittest.cc
new file mode 100644
index 00000000000..dfcc6ea910c
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_unittest.cc
@@ -0,0 +1,410 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
+#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+static const uint32 kSenderSsrc = 0x10203;
+static const uint32 kReceiverSsrc = 0x40506;
+static const uint32 kUnknownSsrc = 0xDEAD;
+static const std::string kCName("test@10.1.1.1");
+static const uint32 kRtcpIntervalMs = 500;
+static const int64 kStartMillisecond = 123456789;
+static const int64 kAddedDelay = 123;
+static const int64 kAddedShortDelay= 100;
+
+class LocalRtcpTransport : public PacedPacketSender {
+ public:
+ explicit LocalRtcpTransport(base::SimpleTestTickClock* testing_clock)
+ : short_delay_(false),
+ testing_clock_(testing_clock) {}
+
+ void SetRtcpReceiver(Rtcp* rtcp) { rtcp_ = rtcp; }
+
+ void SetShortDelay() { short_delay_ = true; }
+
+ virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
+ if (short_delay_) {
+ testing_clock_->Advance(
+ base::TimeDelta::FromMilliseconds(kAddedShortDelay));
+ } else {
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(kAddedDelay));
+ }
+ rtcp_->IncomingRtcpPacket(&(packet[0]), packet.size());
+ return true;
+ }
+
+ virtual bool SendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) OVERRIDE {
+ return false;
+ }
+
+ virtual bool ResendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) OVERRIDE {
+ return false;
+ }
+
+ private:
+ bool short_delay_;
+ Rtcp* rtcp_;
+ base::SimpleTestTickClock* testing_clock_;
+};
+
+class RtcpPeer : public Rtcp {
+ public:
+ RtcpPeer(RtcpSenderFeedback* sender_feedback,
+ PacedPacketSender* const paced_packet_sender,
+ RtpSenderStatistics* rtp_sender_statistics,
+ RtpReceiverStatistics* rtp_receiver_statistics,
+ RtcpMode rtcp_mode,
+ const base::TimeDelta& rtcp_interval,
+ bool sending_media,
+ uint32 local_ssrc,
+ const std::string& c_name)
+ : Rtcp(sender_feedback,
+ paced_packet_sender,
+ rtp_sender_statistics,
+ rtp_receiver_statistics,
+ rtcp_mode,
+ rtcp_interval,
+ sending_media,
+ local_ssrc,
+ c_name) {
+ }
+
+ using Rtcp::CheckForWrapAround;
+ using Rtcp::OnReceivedLipSyncInfo;
+};
+
+class RtcpTest : public ::testing::Test {
+ protected:
+ RtcpTest()
+ : transport_(&testing_clock_) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ ~RtcpTest() {}
+
+ void SetUp() {
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedRpsi(_, _)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedRemb(_)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedNackRequest(_)).Times(0);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ LocalRtcpTransport transport_;
+ MockRtcpSenderFeedback mock_sender_feedback_;
+};
+
+TEST_F(RtcpTest, TimeToSend) {
+ base::TimeTicks start_time =
+ base::TimeTicks::FromInternalValue(kStartMillisecond * 1000);
+ Rtcp rtcp(&mock_sender_feedback_,
+ &transport_,
+ NULL,
+ NULL,
+ kRtcpCompound,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ true, // Media sender.
+ kSenderSsrc,
+ kCName);
+ rtcp.set_clock(&testing_clock_);
+ transport_.SetRtcpReceiver(&rtcp);
+ EXPECT_LE(start_time, rtcp.TimeToSendNextRtcpReport());
+ EXPECT_GE(start_time + base::TimeDelta::FromMilliseconds(
+ kRtcpIntervalMs * 3 / 2),
+ rtcp.TimeToSendNextRtcpReport());
+ base::TimeDelta delta = rtcp.TimeToSendNextRtcpReport() - start_time;
+ testing_clock_.Advance(delta);
+ EXPECT_EQ(testing_clock_.NowTicks(), rtcp.TimeToSendNextRtcpReport());
+}
+
+TEST_F(RtcpTest, BasicSenderReport) {
+ Rtcp rtcp(&mock_sender_feedback_,
+ &transport_,
+ NULL,
+ NULL,
+ kRtcpCompound,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ true, // Media sender.
+ kSenderSsrc,
+ kCName);
+ transport_.SetRtcpReceiver(&rtcp);
+ rtcp.SendRtcpReport(kUnknownSsrc);
+}
+
+TEST_F(RtcpTest, BasicReceiverReport) {
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(1);
+ Rtcp rtcp(&mock_sender_feedback_,
+ &transport_,
+ NULL,
+ NULL,
+ kRtcpCompound,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ false, // Media receiver.
+ kSenderSsrc,
+ kCName);
+ transport_.SetRtcpReceiver(&rtcp);
+ rtcp.SetRemoteSSRC(kSenderSsrc);
+ rtcp.SendRtcpReport(kSenderSsrc);
+}
+
+TEST_F(RtcpTest, BasicPli) {
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(1);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(1);
+
+ // Media receiver.
+ Rtcp rtcp(&mock_sender_feedback_,
+ &transport_,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ false,
+ kSenderSsrc,
+ kCName);
+ rtcp.set_clock(&testing_clock_);
+ transport_.SetRtcpReceiver(&rtcp);
+ rtcp.SetRemoteSSRC(kSenderSsrc);
+ rtcp.SendRtcpPli(kSenderSsrc);
+}
+
+TEST_F(RtcpTest, BasicCast) {
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(1);
+
+ // Media receiver.
+ Rtcp rtcp(&mock_sender_feedback_,
+ &transport_,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ false,
+ kSenderSsrc,
+ kCName);
+ rtcp.set_clock(&testing_clock_);
+ transport_.SetRtcpReceiver(&rtcp);
+ rtcp.SetRemoteSSRC(kSenderSsrc);
+ RtcpCastMessage cast_message(kSenderSsrc);
+ cast_message.ack_frame_id_ = kAckFrameId;
+ std::set<uint16_t> missing_packets;
+ cast_message.missing_frames_and_packets_[
+ kLostFrameId] = missing_packets;
+
+ missing_packets.insert(kLostPacketId1);
+ missing_packets.insert(kLostPacketId2);
+ missing_packets.insert(kLostPacketId3);
+ cast_message.missing_frames_and_packets_[
+ kFrameIdWithLostPackets] = missing_packets;
+ rtcp.SendRtcpCast(cast_message);
+}
+
+TEST_F(RtcpTest, Rtt) {
+ // Media receiver.
+ LocalRtcpTransport receiver_transport(&testing_clock_);
+ Rtcp rtcp_receiver(&mock_sender_feedback_,
+ &receiver_transport,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ false,
+ kReceiverSsrc,
+ kCName);
+ rtcp_receiver.set_clock(&testing_clock_);
+
+ // Media sender.
+ LocalRtcpTransport sender_transport(&testing_clock_);
+ Rtcp rtcp_sender(&mock_sender_feedback_,
+ &sender_transport,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ true,
+ kSenderSsrc,
+ kCName);
+
+ rtcp_sender.set_clock(&testing_clock_);
+ receiver_transport.SetRtcpReceiver(&rtcp_sender);
+ sender_transport.SetRtcpReceiver(&rtcp_receiver);
+
+ rtcp_sender.SetRemoteSSRC(kReceiverSsrc);
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(2);
+
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ rtcp_sender.SendRtcpReport(kSenderSsrc);
+ rtcp_receiver.SendRtcpReport(kSenderSsrc);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ rtcp_sender.SendRtcpReport(kSenderSsrc);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+
+ receiver_transport.SetShortDelay();
+ sender_transport.SetShortDelay();
+ rtcp_receiver.SendRtcpReport(kSenderSsrc);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR((kAddedShortDelay + 3 * kAddedDelay) / 2,
+ avg_rtt.InMilliseconds(),
+ 1);
+ EXPECT_NEAR(kAddedDelay + kAddedShortDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ rtcp_sender.SendRtcpReport(kSenderSsrc);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR((2 * kAddedShortDelay + 2 * kAddedDelay) / 2,
+ avg_rtt.InMilliseconds(),
+ 1);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+}
+
+TEST_F(RtcpTest, NtpAndTime) {
+ RtcpPeer rtcp_peer(&mock_sender_feedback_,
+ NULL,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ false,
+ kReceiverSsrc,
+ kCName);
+ rtcp_peer.set_clock(&testing_clock_);
+ uint32 ntp_seconds = 0;
+ uint32 ntp_fractions = 0;
+ base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
+ 12345678901000LL + kNtpEpochDeltaMicroseconds);
+ ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
+ EXPECT_EQ(12345678u, ntp_seconds);
+ EXPECT_EQ(input_time,
+ ConvertNtpToTime(ntp_seconds, ntp_fractions));
+}
+
+TEST_F(RtcpTest, WrapAround) {
+ RtcpPeer rtcp_peer(&mock_sender_feedback_,
+ NULL,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ false,
+ kReceiverSsrc,
+ kCName);
+ rtcp_peer.set_clock(&testing_clock_);
+ uint32 new_timestamp = 0;
+ uint32 old_timestamp = 0;
+ EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
+ new_timestamp = 1234567890;
+ old_timestamp = 1234567000;
+ EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
+ new_timestamp = 1234567000;
+ old_timestamp = 1234567890;
+ EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
+ new_timestamp = 123;
+ old_timestamp = 4234567890;
+ EXPECT_EQ(1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
+ new_timestamp = 4234567890;
+ old_timestamp = 123;
+ EXPECT_EQ(-1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
+}
+
+TEST_F(RtcpTest, RtpTimestampInSenderTime) {
+ RtcpPeer rtcp_peer(&mock_sender_feedback_,
+ NULL,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ false,
+ kReceiverSsrc,
+ kCName);
+ rtcp_peer.set_clock(&testing_clock_);
+ int frequency = 32000;
+ uint32 rtp_timestamp = 64000;
+ base::TimeTicks rtp_timestamp_in_ticks;
+
+ // Test fail before we get a OnReceivedLipSyncInfo.
+ EXPECT_FALSE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
+ &rtp_timestamp_in_ticks));
+
+ uint32 ntp_seconds = 0;
+ uint32 ntp_fractions = 0;
+ base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
+ 12345678901000LL + kNtpEpochDeltaMicroseconds);
+
+ // Test exact match.
+ ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
+ rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
+ EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
+ &rtp_timestamp_in_ticks));
+ EXPECT_EQ(input_time, rtp_timestamp_in_ticks);
+
+ // Test older rtp_timestamp.
+ rtp_timestamp = 32000;
+ EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
+ &rtp_timestamp_in_ticks));
+ EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(1000),
+ rtp_timestamp_in_ticks);
+
+ // Test older rtp_timestamp with wrap.
+ rtp_timestamp = 4294903296;
+ EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
+ &rtp_timestamp_in_ticks));
+ EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(4000),
+ rtp_timestamp_in_ticks);
+
+ // Test newer rtp_timestamp.
+ rtp_timestamp = 128000;
+ EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
+ &rtp_timestamp_in_ticks));
+ EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(2000),
+ rtp_timestamp_in_ticks);
+
+ // Test newer rtp_timestamp with wrap.
+ rtp_timestamp = 4294903296;
+ rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
+ rtp_timestamp = 64000;
+ EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
+ &rtp_timestamp_in_ticks));
+ EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(4000),
+ rtp_timestamp_in_ticks);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_utility.cc b/chromium/media/cast/rtcp/rtcp_utility.cc
new file mode 100644
index 00000000000..4f9d2ec7693
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_utility.cc
@@ -0,0 +1,862 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/rtcp_utility.h"
+
+#include "base/logging.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+RtcpParser::RtcpParser(const uint8* rtcpData, size_t rtcpDataLength)
+ : rtcp_data_begin_(rtcpData),
+ rtcp_data_end_(rtcpData + rtcpDataLength),
+ valid_packet_(false),
+ rtcp_data_(rtcpData),
+ rtcp_block_end_(NULL),
+ state_(kStateTopLevel),
+ number_of_blocks_(0),
+ field_type_(kRtcpNotValidCode) {
+ Validate();
+}
+
+RtcpParser::~RtcpParser() {}
+
+RtcpFieldTypes RtcpParser::FieldType() const {
+ return field_type_;
+}
+
+const RtcpField& RtcpParser::Field() const {
+ return field_;
+}
+
+RtcpFieldTypes RtcpParser::Begin() {
+ rtcp_data_ = rtcp_data_begin_;
+ return Iterate();
+}
+
+RtcpFieldTypes RtcpParser::Iterate() {
+ // Reset packet type
+ field_type_ = kRtcpNotValidCode;
+
+ if (!IsValid()) return kRtcpNotValidCode;
+
+ switch (state_) {
+ case kStateTopLevel:
+ IterateTopLevel();
+ break;
+ case kStateReportBlock:
+ IterateReportBlockItem();
+ break;
+ case kStateSdes:
+ IterateSdesItem();
+ break;
+ case kStateBye:
+ IterateByeItem();
+ break;
+ case kStateExtendedReportBlock:
+ IterateExtendedReportItem();
+ break;
+ case kStateExtendedReportDelaySinceLastReceiverReport:
+ IterateExtendedReportDelaySinceLastReceiverReportItem();
+ break;
+ case kStateGenericRtpFeedbackNack:
+ IterateNackItem();
+ break;
+ case kStatePayloadSpecificRpsi:
+ IterateRpsiItem();
+ break;
+ case kStatePayloadSpecificFir:
+ IterateFirItem();
+ break;
+ case kStatePayloadSpecificApplication:
+ IteratePayloadSpecificAppItem();
+ break;
+ case kStatePayloadSpecificRemb:
+ IteratePayloadSpecificRembItem();
+ break;
+ case kStatePayloadSpecificCast:
+ IteratePayloadSpecificCastItem();
+ break;
+ case kStatePayloadSpecificCastNack:
+ IteratePayloadSpecificCastNackItem();
+ break;
+ }
+ return field_type_;
+}
+
+void RtcpParser::IterateTopLevel() {
+ for (;;) {
+ RtcpCommonHeader header;
+
+ bool success = RtcpParseCommonHeader(rtcp_data_, rtcp_data_end_, &header);
+ if (!success) return;
+
+ rtcp_block_end_ = rtcp_data_ + header.length_in_octets;
+
+ if (rtcp_block_end_ > rtcp_data_end_) return; // Bad block!
+
+ switch (header.PT) {
+ case kPacketTypeSenderReport:
+ // number of Report blocks
+ number_of_blocks_ = header.IC;
+ ParseSR();
+ return;
+ case kPacketTypeReceiverReport:
+ // number of Report blocks
+ number_of_blocks_ = header.IC;
+ ParseRR();
+ return;
+ case kPacketTypeSdes:
+ // number of Sdes blocks
+ number_of_blocks_ = header.IC;
+ if (!ParseSdes()) {
+ break; // Nothing supported found, continue to next block!
+ }
+ return;
+ case kPacketTypeBye:
+ number_of_blocks_ = header.IC;
+ if (!ParseBye()) {
+ // Nothing supported found, continue to next block!
+ break;
+ }
+ return;
+ case kPacketTypeGenericRtpFeedback: // Fall through!
+ case kPacketTypePayloadSpecific:
+ if (!ParseFeedBackCommon(header)) {
+ // Nothing supported found, continue to next block!
+ break;
+ }
+ return;
+ case kPacketTypeXr:
+ if (!ParseExtendedReport()) {
+ break; // Nothing supported found, continue to next block!
+ }
+ return;
+ default:
+ // Not supported! Skip!
+ EndCurrentBlock();
+ break;
+ }
+ }
+}
+
+void RtcpParser::IterateReportBlockItem() {
+ bool success = ParseReportBlockItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateSdesItem() {
+ bool success = ParseSdesItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateByeItem() {
+ bool success = ParseByeItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateExtendedReportItem() {
+ bool success = ParseExtendedReportItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateExtendedReportDelaySinceLastReceiverReportItem() {
+ bool success = ParseExtendedReportDelaySinceLastReceiverReport();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateNackItem() {
+ bool success = ParseNackItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateRpsiItem() {
+ bool success = ParseRpsiItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateFirItem() {
+ bool success = ParseFirItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IteratePayloadSpecificAppItem() {
+ bool success = ParsePayloadSpecificAppItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IteratePayloadSpecificRembItem() {
+ bool success = ParsePayloadSpecificRembItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IteratePayloadSpecificCastItem() {
+ bool success = ParsePayloadSpecificCastItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IteratePayloadSpecificCastNackItem() {
+ bool success = ParsePayloadSpecificCastNackItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::Validate() {
+ if (rtcp_data_ == NULL) return; // NOT VALID
+
+ RtcpCommonHeader header;
+ bool success = RtcpParseCommonHeader(rtcp_data_begin_, rtcp_data_end_,
+ &header);
+
+ if (!success) return; // NOT VALID!
+
+ valid_packet_ = true;
+}
+
+bool RtcpParser::IsValid() const {
+ return valid_packet_;
+}
+
+void RtcpParser::EndCurrentBlock() {
+ rtcp_data_ = rtcp_block_end_;
+}
+
+bool RtcpParser::RtcpParseCommonHeader(const uint8* data_begin,
+ const uint8* data_end,
+ RtcpCommonHeader* parsed_header) const {
+ if (!data_begin || !data_end) return false;
+
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // |V=2|P| IC | PT | length |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ //
+ // Common header for all Rtcp packets, 4 octets.
+
+ if ((data_end - data_begin) < 4) return false;
+
+ parsed_header->V = data_begin[0] >> 6;
+ parsed_header->P = ((data_begin[0] & 0x20) == 0) ? false : true;
+ parsed_header->IC = data_begin[0] & 0x1f;
+ parsed_header->PT = data_begin[1];
+
+ parsed_header->length_in_octets =
+ ((data_begin[2] << 8) + data_begin[3] + 1) * 4;
+
+ if (parsed_header->length_in_octets == 0) return false;
+
+ // Check if RTP version field == 2.
+ if (parsed_header->V != 2) return false;
+
+ return true;
+}
+
+bool RtcpParser::ParseRR() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 8) return false;
+
+ field_type_ = kRtcpRrCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.Skip(4); // Skip header
+ big_endian_reader.ReadU32(&field_.receiver_report.sender_ssrc);
+ field_.receiver_report.number_of_report_blocks = number_of_blocks_;
+ rtcp_data_ += 8;
+
+ // State transition
+ state_ = kStateReportBlock;
+ return true;
+}
+
+bool RtcpParser::ParseSR() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 28) {
+ EndCurrentBlock();
+ return false;
+ }
+ field_type_ = kRtcpSrCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.Skip(4); // Skip header
+ big_endian_reader.ReadU32(&field_.sender_report.sender_ssrc);
+ big_endian_reader.ReadU32(&field_.sender_report.ntp_most_significant);
+ big_endian_reader.ReadU32(&field_.sender_report.ntp_least_significant);
+ big_endian_reader.ReadU32(&field_.sender_report.rtp_timestamp);
+ big_endian_reader.ReadU32(&field_.sender_report.sender_packet_count);
+ big_endian_reader.ReadU32(&field_.sender_report.sender_octet_count);
+ field_.sender_report.number_of_report_blocks = number_of_blocks_;
+ rtcp_data_ += 28;
+
+ if (number_of_blocks_ != 0) {
+ // State transition.
+ state_ = kStateReportBlock;
+ } else {
+ // Don't go to state report block item if 0 report blocks.
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ }
+ return true;
+}
+
+bool RtcpParser::ParseReportBlockItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 24 || number_of_blocks_ <= 0) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&field_.report_block_item.ssrc);
+ big_endian_reader.ReadU8(&field_.report_block_item.fraction_lost);
+
+ uint8 temp_number_of_packets_lost;
+ big_endian_reader.ReadU8(&temp_number_of_packets_lost);
+ field_.report_block_item.cumulative_number_of_packets_lost =
+ temp_number_of_packets_lost << 16;
+ big_endian_reader.ReadU8(&temp_number_of_packets_lost);
+ field_.report_block_item.cumulative_number_of_packets_lost +=
+ temp_number_of_packets_lost << 8;
+ big_endian_reader.ReadU8(&temp_number_of_packets_lost);
+ field_.report_block_item.cumulative_number_of_packets_lost +=
+ temp_number_of_packets_lost;
+
+ big_endian_reader.ReadU32(
+ &field_.report_block_item.extended_highest_sequence_number);
+ big_endian_reader.ReadU32(&field_.report_block_item.jitter);
+ big_endian_reader.ReadU32(&field_.report_block_item.last_sender_report);
+ big_endian_reader.ReadU32(&field_.report_block_item.delay_last_sender_report);
+ rtcp_data_ += 24;
+
+ number_of_blocks_--;
+ field_type_ = kRtcpReportBlockItemCode;
+ return true;
+}
+
+bool RtcpParser::ParseSdes() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 8) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ rtcp_data_ += 4; // Skip header
+
+ state_ = kStateSdes;
+ field_type_ = kRtcpSdesCode;
+ return true;
+}
+
+bool RtcpParser::ParseSdesItem() {
+ if (number_of_blocks_ <= 0) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ number_of_blocks_--;
+
+ // Find c_name item in a Sdes chunk.
+ while (rtcp_data_ < rtcp_block_end_) {
+ ptrdiff_t data_length = rtcp_block_end_ - rtcp_data_;
+ if (data_length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ uint32 ssrc;
+ net::BigEndianReader big_endian_reader(rtcp_data_, data_length);
+ big_endian_reader.ReadU32(&ssrc);
+ rtcp_data_ += 4;
+
+ bool found_c_name = ParseSdesTypes();
+ if (found_c_name) {
+ field_.c_name.sender_ssrc = ssrc;
+ return true;
+ }
+ }
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+}
+
+bool RtcpParser::ParseSdesTypes() {
+ // Only the c_name item is mandatory. RFC 3550 page 46.
+ bool found_c_name = false;
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+
+ while (big_endian_reader.remaining() > 0) {
+ uint8 tag;
+ big_endian_reader.ReadU8(&tag);
+
+ if (tag == 0) {
+ // End tag! 4 octet aligned.
+ rtcp_data_ = rtcp_block_end_;
+ return found_c_name;
+ }
+
+ if (big_endian_reader.remaining() > 0) {
+ uint8 len;
+ big_endian_reader.ReadU8(&len);
+
+ if (tag == 1) { // c_name.
+ // Sanity check.
+ if (big_endian_reader.remaining() < len) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ int i = 0;
+ for (; i < len; ++i) {
+ uint8 c;
+ big_endian_reader.ReadU8(&c);
+ if ((c < ' ') || (c > '{') || (c == '%') || (c == '\\')) {
+ // Illegal char.
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ field_.c_name.name[i] = c;
+ }
+ // Make sure we are null terminated.
+ field_.c_name.name[i] = 0;
+ field_type_ = kRtcpSdesChunkCode;
+ found_c_name = true;
+ } else {
+ big_endian_reader.Skip(len);
+ }
+ }
+ }
+ // No end tag found!
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+}
+
+bool RtcpParser::ParseBye() {
+ rtcp_data_ += 4; // Skip header.
+ state_ = kStateBye;
+ return ParseByeItem();
+}
+
+bool RtcpParser::ParseByeItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 4 || number_of_blocks_ == 0) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ field_type_ = kRtcpByeCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&field_.bye.sender_ssrc);
+ rtcp_data_ += 4;
+
+ // We can have several CSRCs attached.
+ if (length >= 4 * number_of_blocks_) {
+ rtcp_data_ += (number_of_blocks_ - 1) * 4;
+ }
+ number_of_blocks_ = 0;
+ return true;
+}
+
+bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
+ DCHECK((header.PT == kPacketTypeGenericRtpFeedback) ||
+ (header.PT == kPacketTypePayloadSpecific)) << "Invalid state";
+
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 12) { // 4 * 3, RFC4585 section 6.1
+ EndCurrentBlock();
+ return false;
+ }
+
+ uint32 sender_ssrc;
+ uint32 media_ssrc;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.Skip(4); // Skip header.
+ big_endian_reader.ReadU32(&sender_ssrc);
+ big_endian_reader.ReadU32(&media_ssrc);
+
+ rtcp_data_ += 12;
+
+ if (header.PT == kPacketTypeGenericRtpFeedback) {
+ // Transport layer feedback
+ switch (header.IC) {
+ case 1:
+ // Nack
+ field_type_ = kRtcpGenericRtpFeedbackNackCode;
+ field_.nack.sender_ssrc = sender_ssrc;
+ field_.nack.media_ssrc = media_ssrc;
+ state_ = kStateGenericRtpFeedbackNack;
+ return true;
+ case 2:
+ // Used to be ACK is this code point, which is removed conficts with
+ // http://tools.ietf.org/html/draft-levin-avt-rtcp-burst-00
+ break;
+ case 3:
+ // Tmmbr
+ break;
+ case 4:
+ // Tmmbn
+ break;
+ case 5:
+ // RFC 6051 RTCP-sender_report-REQ Rapid Synchronisation of RTP Flows
+ // Trigger a new Rtcp sender_report
+ field_type_ = kRtcpGenericRtpFeedbackSrReqCode;
+
+ // Note: No state transition, sender report REQ is empty!
+ return true;
+ default:
+ break;
+ }
+ EndCurrentBlock();
+ return false;
+
+ } else if (header.PT == kPacketTypePayloadSpecific) {
+ // Payload specific feedback
+ switch (header.IC) {
+ case 1:
+ // PLI
+ field_type_ = kRtcpPayloadSpecificPliCode;
+ field_.pli.sender_ssrc = sender_ssrc;
+ field_.pli.media_ssrc = media_ssrc;
+
+ // Note: No state transition, PLI FCI is empty!
+ return true;
+ case 2:
+ // Sli
+ break;
+ case 3:
+ field_type_ = kRtcpPayloadSpecificRpsiCode;
+ field_.rpsi.sender_ssrc = sender_ssrc;
+ field_.rpsi.media_ssrc = media_ssrc;
+ state_ = kStatePayloadSpecificRpsi;
+ return true;
+ case 4:
+ // fir
+ break;
+ case 15:
+ field_type_ = kRtcpPayloadSpecificAppCode;
+ field_.application_specific.sender_ssrc = sender_ssrc;
+ field_.application_specific.media_ssrc = media_ssrc;
+ state_ = kStatePayloadSpecificApplication;
+ return true;
+ default:
+ break;
+ }
+
+ EndCurrentBlock();
+ return false;
+ } else {
+ DCHECK(false) << "Invalid state";
+ EndCurrentBlock();
+ return false;
+ }
+}
+
+bool RtcpParser::ParseRpsiItem() {
+ // RFC 4585 6.3.3. Reference Picture Selection Indication (rpsi)
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | PB |0| Payload Type| Native rpsi bit string |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | defined per codec ... | Padding (0) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ if (length > 2 + kRtcpRpsiDataSize) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ field_type_ = kRtcpPayloadSpecificRpsiCode;
+
+ uint8 padding_bits;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU8(&padding_bits);
+ big_endian_reader.ReadU8(&field_.rpsi.payload_type);
+ big_endian_reader.ReadBytes(&field_.rpsi.native_bit_string, length - 2);
+ field_.rpsi.number_of_valid_bits =
+ static_cast<uint16>(length - 2) * 8 - padding_bits;
+
+ rtcp_data_ += length;
+ return true;
+}
+
+bool RtcpParser::ParseNackItem() {
+ // RFC 4585 6.2.1. Generic Nack
+
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ field_type_ = kRtcpGenericRtpFeedbackNackItemCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU16(&field_.nack_item.packet_id);
+ big_endian_reader.ReadU16(&field_.nack_item.bitmask);
+ rtcp_data_ += 4;
+ return true;
+}
+
+bool RtcpParser::ParsePayloadSpecificAppItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ uint32 name;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&name);
+ rtcp_data_ += 4;
+
+ if (name == kRemb) {
+ field_type_ = kRtcpPayloadSpecificRembCode;
+ state_ = kStatePayloadSpecificRemb;
+ return true;
+ } else if (name == kCast) {
+ field_type_ = kRtcpPayloadSpecificCastCode;
+ state_ = kStatePayloadSpecificCast;
+ return true;
+ }
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+}
+
+bool RtcpParser::ParsePayloadSpecificRembItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU8(&field_.remb_item.number_of_ssrcs);
+
+ uint8 byte_1;
+ uint8 byte_2;
+ uint8 byte_3;
+ big_endian_reader.ReadU8(&byte_1);
+ big_endian_reader.ReadU8(&byte_2);
+ big_endian_reader.ReadU8(&byte_3);
+ rtcp_data_ += 4;
+
+ uint8 br_exp = (byte_1 >> 2) & 0x3F;
+ uint32 br_mantissa = ((byte_1 & 0x03) << 16) + (byte_2 << 8) + byte_3;
+ field_.remb_item.bitrate = (br_mantissa << br_exp);
+
+ ptrdiff_t length_ssrcs = rtcp_block_end_ - rtcp_data_;
+ if (length_ssrcs < 4 * field_.remb_item.number_of_ssrcs) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ field_type_ = kRtcpPayloadSpecificRembItemCode;
+
+ for (int i = 0; i < field_.remb_item.number_of_ssrcs; i++) {
+ big_endian_reader.ReadU32(&field_.remb_item.ssrcs[i]);
+ }
+ return true;
+}
+
+bool RtcpParser::ParsePayloadSpecificCastItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ field_type_ = kRtcpPayloadSpecificCastCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU8(&field_.cast_item.last_frame_id);
+ big_endian_reader.ReadU8(&field_.cast_item.number_of_lost_fields);
+
+ rtcp_data_ += 4;
+
+ if (field_.cast_item.number_of_lost_fields != 0) {
+ // State transition
+ state_ = kStatePayloadSpecificCastNack;
+ } else {
+ // Don't go to state cast nack item if got 0 fields.
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ }
+ return true;
+}
+
+bool RtcpParser::ParsePayloadSpecificCastNackItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ field_type_ = kRtcpPayloadSpecificCastNackItemCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU8(&field_.cast_nack_item.frame_id);
+ big_endian_reader.ReadU16(&field_.cast_nack_item.packet_id);
+ big_endian_reader.ReadU8(&field_.cast_nack_item.bitmask);
+
+ rtcp_data_ += 4;
+ return true;
+}
+
+bool RtcpParser::ParseFirItem() {
+ // RFC 5104 4.3.1. Full Intra Request (fir)
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 8) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ field_type_ = kRtcpPayloadSpecificFirItemCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&field_.fir_item.ssrc);
+ big_endian_reader.ReadU8(&field_.fir_item.command_sequence_number);
+
+ rtcp_data_ += 8;
+ return true;
+}
+
+bool RtcpParser::ParseExtendedReport() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 8) return false;
+
+ field_type_ = kRtcpXrCode;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.Skip(4); // Skip header.
+ big_endian_reader.ReadU32(&field_.extended_report.sender_ssrc);
+
+ rtcp_data_ += 8;
+
+ state_ = kStateExtendedReportBlock;
+ return true;
+}
+
+bool RtcpParser::ParseExtendedReportItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ uint8 block_type;
+ uint16 block_length;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU8(&block_type);
+ big_endian_reader.Skip(1); // Ignore reserved.
+ big_endian_reader.ReadU16(&block_length);
+
+ rtcp_data_ += 4;
+
+ switch (block_type) {
+ case 4:
+ if (block_length != 2) {
+ // Invalid block length.
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ return ParseExtendedReportReceiverReferenceTimeReport();
+ case 5:
+ if (block_length % 3 != 0) {
+ // Invalid block length.
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ if (block_length >= 3) {
+ number_of_blocks_ = block_length / 3;
+ state_ = kStateExtendedReportDelaySinceLastReceiverReport;
+ return ParseExtendedReportDelaySinceLastReceiverReport();
+ }
+ return true;
+ default:
+ if (length < block_length * 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ field_type_ = kRtcpXrUnknownItemCode;
+ rtcp_data_ += block_length * 4;
+ return true;
+ }
+}
+
+bool RtcpParser::ParseExtendedReportReceiverReferenceTimeReport() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 8) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&field_.rrtr.ntp_most_significant);
+ big_endian_reader.ReadU32(&field_.rrtr.ntp_least_significant);
+
+ rtcp_data_ += 8;
+
+ field_type_ = kRtcpXrRrtrCode;
+ return true;
+}
+
+bool RtcpParser::ParseExtendedReportDelaySinceLastReceiverReport() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 12) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ if (number_of_blocks_ == 0) {
+ // Continue parsing the extended report block.
+ state_ = kStateExtendedReportBlock;
+ return false;
+ }
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&field_.dlrr.receivers_ssrc);
+ big_endian_reader.ReadU32(&field_.dlrr.last_receiver_report);
+ big_endian_reader.ReadU32(&field_.dlrr.delay_last_receiver_report);
+
+ rtcp_data_ += 12;
+
+ number_of_blocks_--;
+ field_type_ = kRtcpXrDlrrCode;
+ return true;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_utility.h b/chromium/media/cast/rtcp/rtcp_utility.h
new file mode 100644
index 00000000000..2df13e7aed9
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_utility.h
@@ -0,0 +1,319 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_RTCP_UTILITY_H_
+#define MEDIA_CAST_RTCP_RTCP_UTILITY_H_
+
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+
+namespace media {
+namespace cast {
+
+static const int kRtcpRpsiDataSize = 30;
+static const int kRtcpCnameSize = 256; // RFC 3550 page 44, including end null.
+static const int kRtcpMaxNumberOfRembFeedbackSsrcs = 255;
+
+static const uint32 kRemb = ('R' << 24) + ('E' << 16) + ('M' << 8) + 'B';
+static const uint32 kCast = ('C' << 24) + ('A' << 16) + ('S' << 8) + 'T';
+
+struct RtcpFieldReceiverReport {
+ // RFC 3550.
+ uint32 sender_ssrc;
+ uint8 number_of_report_blocks;
+};
+
+struct RtcpFieldSenderReport {
+ // RFC 3550.
+ uint32 sender_ssrc;
+ uint8 number_of_report_blocks;
+ uint32 ntp_most_significant;
+ uint32 ntp_least_significant;
+ uint32 rtp_timestamp;
+ uint32 sender_packet_count;
+ uint32 sender_octet_count;
+};
+
+struct RtcpFieldReportBlockItem {
+ // RFC 3550.
+ uint32 ssrc;
+ uint8 fraction_lost;
+ uint32 cumulative_number_of_packets_lost;
+ uint32 extended_highest_sequence_number;
+ uint32 jitter;
+ uint32 last_sender_report;
+ uint32 delay_last_sender_report;
+};
+
+struct RtcpFieldSdesCName {
+ // RFC 3550
+ uint32 sender_ssrc;
+ char name[kRtcpCnameSize];
+};
+
+struct RtcpFieldBye {
+ // RFC 3550.
+ uint32 sender_ssrc;
+};
+
+struct RtcpFieldGenericRtpFeedbackNack {
+ // RFC 4585.
+ uint32 sender_ssrc;
+ uint32 media_ssrc;
+};
+
+struct RtcpFieldGenericRtpFeedbackNackItem {
+ // RFC 4585.
+ uint16 packet_id;
+ uint16 bitmask;
+};
+
+struct RtcpFieldPayloadSpecificFir {
+ // RFC 5104.
+ uint32 sender_ssrc;
+ uint32 media_ssrc; // zero!
+};
+
+struct RtcpFieldPayloadSpecificFirItem {
+ // RFC 5104.
+ uint32 ssrc;
+ uint8 command_sequence_number;
+};
+
+struct RtcpFieldPayloadSpecificPli {
+ // RFC 4585.
+ uint32 sender_ssrc;
+ uint32 media_ssrc;
+};
+
+struct RtcpFieldPayloadSpecificRpsi {
+ // RFC 4585.
+ uint32 sender_ssrc;
+ uint32 media_ssrc;
+ uint8 payload_type;
+ uint16 number_of_valid_bits;
+ uint8 native_bit_string[kRtcpRpsiDataSize];
+};
+
+struct RtcpFieldXr {
+ // RFC 3611.
+ uint32 sender_ssrc;
+};
+
+struct RtcpFieldXrRrtr {
+ // RFC 3611.
+ uint32 ntp_most_significant;
+ uint32 ntp_least_significant;
+};
+
+struct RtcpFieldXrDlrr {
+ // RFC 3611.
+ uint32 receivers_ssrc;
+ uint32 last_receiver_report;
+ uint32 delay_last_receiver_report;
+};
+
+struct RtcpFieldPayloadSpecificApplication {
+ uint32 sender_ssrc;
+ uint32 media_ssrc;
+};
+
+struct RtcpFieldPayloadSpecificRembItem {
+ uint32 bitrate;
+ uint8 number_of_ssrcs;
+ uint32 ssrcs[kRtcpMaxNumberOfRembFeedbackSsrcs];
+};
+
+struct RtcpFieldPayloadSpecificCastItem {
+ uint8 last_frame_id;
+ uint8 number_of_lost_fields;
+};
+
+struct RtcpFieldPayloadSpecificCastNackItem {
+ uint8 frame_id;
+ uint16 packet_id;
+ uint8 bitmask;
+};
+
+union RtcpField {
+ RtcpFieldReceiverReport receiver_report;
+ RtcpFieldSenderReport sender_report;
+ RtcpFieldReportBlockItem report_block_item;
+ RtcpFieldSdesCName c_name;
+ RtcpFieldBye bye;
+
+ RtcpFieldXr extended_report;
+ RtcpFieldXrRrtr rrtr;
+ RtcpFieldXrDlrr dlrr;
+
+ RtcpFieldGenericRtpFeedbackNack nack;
+ RtcpFieldGenericRtpFeedbackNackItem nack_item;
+
+ RtcpFieldPayloadSpecificPli pli;
+ RtcpFieldPayloadSpecificRpsi rpsi;
+ RtcpFieldPayloadSpecificFir fir;
+ RtcpFieldPayloadSpecificFirItem fir_item;
+ RtcpFieldPayloadSpecificApplication application_specific;
+ RtcpFieldPayloadSpecificRembItem remb_item;
+ RtcpFieldPayloadSpecificCastItem cast_item;
+ RtcpFieldPayloadSpecificCastNackItem cast_nack_item;
+};
+
+enum RtcpFieldTypes {
+ kRtcpNotValidCode,
+
+ // RFC 3550.
+ kRtcpRrCode,
+ kRtcpSrCode,
+ kRtcpReportBlockItemCode,
+
+ kRtcpSdesCode,
+ kRtcpSdesChunkCode,
+ kRtcpByeCode,
+
+ // RFC 3611.
+ kRtcpXrCode,
+ kRtcpXrRrtrCode,
+ kRtcpXrDlrrCode,
+ kRtcpXrUnknownItemCode,
+
+ // RFC 4585.
+ kRtcpGenericRtpFeedbackNackCode,
+ kRtcpGenericRtpFeedbackNackItemCode,
+
+ kRtcpPayloadSpecificPliCode,
+ kRtcpPayloadSpecificRpsiCode,
+ kRtcpPayloadSpecificAppCode,
+
+ kRtcpPayloadSpecificRembCode,
+ kRtcpPayloadSpecificRembItemCode,
+ kRtcpPayloadSpecificCastCode,
+ kRtcpPayloadSpecificCastNackItemCode,
+
+ // RFC 5104.
+ kRtcpPayloadSpecificFirCode,
+ kRtcpPayloadSpecificFirItemCode,
+
+ // RFC 6051.
+ kRtcpGenericRtpFeedbackSrReqCode,
+};
+
+struct RtcpCommonHeader {
+ uint8 V; // Version.
+ bool P; // Padding.
+ uint8 IC; // Item count / subtype.
+ uint8 PT; // Packet Type.
+ uint16 length_in_octets;
+};
+
+enum RtcpPacketTypes {
+ kPacketTypeLow = 194, // SMPTE time-code mapping.
+ kPacketTypeInterArrivalJitterReport = 195,
+ kPacketTypeSenderReport = 200,
+ kPacketTypeReceiverReport = 201,
+ kPacketTypeSdes= 202,
+ kPacketTypeBye = 203,
+ kPacketTypeApplicationDefined = 204,
+ kPacketTypeGenericRtpFeedback = 205,
+ kPacketTypePayloadSpecific = 206,
+ kPacketTypeXr = 207,
+ kPacketTypeHigh = 210, // Port Mapping.
+};
+
+class RtcpParser {
+ public:
+ RtcpParser(const uint8* rtcp_data, size_t rtcp_length);
+ ~RtcpParser();
+
+ RtcpFieldTypes FieldType() const;
+ const RtcpField& Field() const;
+
+ bool IsValid() const;
+
+ RtcpFieldTypes Begin();
+ RtcpFieldTypes Iterate();
+
+ private:
+ enum ParseState {
+ kStateTopLevel, // Top level packet
+ kStateReportBlock, // Sender/Receiver report report blocks.
+ kStateSdes,
+ kStateBye,
+ kStateExtendedReportBlock,
+ kStateExtendedReportDelaySinceLastReceiverReport,
+ kStateGenericRtpFeedbackNack,
+ kStatePayloadSpecificRpsi,
+ kStatePayloadSpecificFir,
+ kStatePayloadSpecificApplication,
+ kStatePayloadSpecificRemb, // Application specific Remb.
+ kStatePayloadSpecificCast, // Application specific Cast.
+ kStatePayloadSpecificCastNack, // Application specific Nack for Cast.
+ };
+
+ bool RtcpParseCommonHeader(const uint8* begin,
+ const uint8* end,
+ RtcpCommonHeader* parsed_header) const;
+
+ void IterateTopLevel();
+ void IterateReportBlockItem();
+ void IterateSdesItem();
+ void IterateByeItem();
+ void IterateExtendedReportItem();
+ void IterateExtendedReportDelaySinceLastReceiverReportItem();
+ void IterateNackItem();
+ void IterateRpsiItem();
+ void IterateFirItem();
+ void IteratePayloadSpecificAppItem();
+ void IteratePayloadSpecificRembItem();
+ void IteratePayloadSpecificCastItem();
+ void IteratePayloadSpecificCastNackItem();
+
+ void Validate();
+ void EndCurrentBlock();
+
+ bool ParseRR();
+ bool ParseSR();
+ bool ParseReportBlockItem();
+
+ bool ParseSdes();
+ bool ParseSdesItem();
+ bool ParseSdesTypes();
+ bool ParseBye();
+ bool ParseByeItem();
+
+ bool ParseExtendedReport();
+ bool ParseExtendedReportItem();
+ bool ParseExtendedReportReceiverReferenceTimeReport();
+ bool ParseExtendedReportDelaySinceLastReceiverReport();
+
+ bool ParseFeedBackCommon(const RtcpCommonHeader& header);
+ bool ParseNackItem();
+ bool ParseRpsiItem();
+ bool ParseFirItem();
+ bool ParsePayloadSpecificAppItem();
+ bool ParsePayloadSpecificRembItem();
+ bool ParsePayloadSpecificCastItem();
+ bool ParsePayloadSpecificCastNackItem();
+
+ private:
+ const uint8* const rtcp_data_begin_;
+ const uint8* const rtcp_data_end_;
+
+ bool valid_packet_;
+ const uint8* rtcp_data_;
+ const uint8* rtcp_block_end_;
+
+ ParseState state_;
+ uint8 number_of_blocks_;
+ RtcpFieldTypes field_type_;
+ RtcpField field_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpParser);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_RTCP_UTILITY_H_
diff --git a/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc b/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
new file mode 100644
index 00000000000..d6468e53a4e
--- /dev/null
+++ b/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
@@ -0,0 +1,230 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+TestRtcpPacketBuilder::TestRtcpPacketBuilder()
+ : ptr_of_length_(NULL),
+ big_endian_writer_(buffer_, kIpPacketSize) {
+}
+
+void TestRtcpPacketBuilder::AddSr(uint32 sender_ssrc,
+ int number_of_report_blocks) {
+ AddRtcpHeader(200, number_of_report_blocks);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(kNtpHigh); // NTP timestamp.
+ big_endian_writer_.WriteU32(kNtpLow);
+ big_endian_writer_.WriteU32(kRtpTimestamp);
+ big_endian_writer_.WriteU32(kSendPacketCount);
+ big_endian_writer_.WriteU32(kSendOctetCount);
+}
+
+void TestRtcpPacketBuilder::AddRr(uint32 sender_ssrc,
+ int number_of_report_blocks) {
+ AddRtcpHeader(201, number_of_report_blocks);
+ big_endian_writer_.WriteU32(sender_ssrc);
+}
+
+void TestRtcpPacketBuilder::AddRb(uint32 rtp_ssrc) {
+ big_endian_writer_.WriteU32(rtp_ssrc);
+ big_endian_writer_.WriteU32(kLoss);
+ big_endian_writer_.WriteU32(kExtendedMax);
+ big_endian_writer_.WriteU32(kJitter);
+ big_endian_writer_.WriteU32(kLastSr);
+ big_endian_writer_.WriteU32(kDelayLastSr);
+}
+
+void TestRtcpPacketBuilder::AddSdesCname(uint32 sender_ssrc,
+ const std::string& c_name) {
+ AddRtcpHeader(202, 1);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU8(1); // c_name.
+ big_endian_writer_.WriteU8(c_name.size()); // c_name length in bytes.
+ for (size_t i = 0; i < c_name.size(); ++i) {
+ big_endian_writer_.WriteU8(c_name.c_str()[i]);
+ }
+ int padding;
+ switch (c_name.size() % 4) {
+ case 0:
+ padding = 2;
+ break;
+ case 1:
+ padding = 1;
+ break;
+ case 2:
+ padding = 4;
+ break;
+ case 3:
+ padding = 3;
+ break;
+ }
+ for (int j = 0; j < padding; ++j) {
+ big_endian_writer_.WriteU8(0);
+ }
+}
+
+void TestRtcpPacketBuilder::AddXrHeader(uint32 sender_ssrc) {
+ AddRtcpHeader(207, 0);
+ big_endian_writer_.WriteU32(sender_ssrc);
+}
+
+void TestRtcpPacketBuilder::AddXrUnknownBlock() {
+ big_endian_writer_.WriteU8(9); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU16(4); // Block length.
+ // First receiver same as sender of this report.
+ big_endian_writer_.WriteU32(0);
+ big_endian_writer_.WriteU32(0);
+ big_endian_writer_.WriteU32(0);
+ big_endian_writer_.WriteU32(0);
+}
+
+void TestRtcpPacketBuilder::AddXrDlrrBlock(uint32 sender_ssrc) {
+ big_endian_writer_.WriteU8(5); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU16(3); // Block length.
+
+ // First receiver same as sender of this report.
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(kLastRr);
+ big_endian_writer_.WriteU32(kDelayLastRr);
+}
+
+void TestRtcpPacketBuilder::AddXrExtendedDlrrBlock(uint32 sender_ssrc) {
+ big_endian_writer_.WriteU8(5); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU16(9); // Block length.
+ big_endian_writer_.WriteU32(0xaaaaaaaa);
+ big_endian_writer_.WriteU32(0xaaaaaaaa);
+ big_endian_writer_.WriteU32(0xaaaaaaaa);
+
+ // First receiver same as sender of this report.
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(kLastRr);
+ big_endian_writer_.WriteU32(kDelayLastRr);
+ big_endian_writer_.WriteU32(0xbbbbbbbb);
+ big_endian_writer_.WriteU32(0xbbbbbbbb);
+ big_endian_writer_.WriteU32(0xbbbbbbbb);
+}
+
+void TestRtcpPacketBuilder::AddXrRrtrBlock() {
+ big_endian_writer_.WriteU8(4); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU16(2); // Block length.
+ big_endian_writer_.WriteU32(kNtpHigh);
+ big_endian_writer_.WriteU32(kNtpLow);
+}
+
+void TestRtcpPacketBuilder::AddNack(uint32 sender_ssrc, uint32 media_ssrc) {
+ AddRtcpHeader(205, 1);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(media_ssrc);
+ big_endian_writer_.WriteU16(kMissingPacket);
+ big_endian_writer_.WriteU16(0);
+}
+
+void TestRtcpPacketBuilder::AddSendReportRequest(uint32 sender_ssrc,
+ uint32 media_ssrc) {
+ AddRtcpHeader(205, 5);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(media_ssrc);
+}
+
+void TestRtcpPacketBuilder::AddPli(uint32 sender_ssrc, uint32 media_ssrc) {
+ AddRtcpHeader(206, 1);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(media_ssrc);
+}
+
+void TestRtcpPacketBuilder::AddRpsi(uint32 sender_ssrc, uint32 media_ssrc) {
+ AddRtcpHeader(206, 3);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(media_ssrc);
+ big_endian_writer_.WriteU8(0); // Padding bits.
+ big_endian_writer_.WriteU8(kPayloadtype);
+ uint64 picture_id = kPictureId;
+
+ for (int i = 9; i > 0; i--) {
+ big_endian_writer_.WriteU8(
+ 0x80 | static_cast<uint8>(picture_id >> (i * 7)));
+ }
+ // Add last byte of picture ID.
+ big_endian_writer_.WriteU8(static_cast<uint8>(picture_id & 0x7f));
+}
+
+void TestRtcpPacketBuilder::AddRemb(uint32 sender_ssrc, uint32 media_ssrc) {
+ AddRtcpHeader(206, 15);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(0);
+ big_endian_writer_.WriteU8('R');
+ big_endian_writer_.WriteU8('E');
+ big_endian_writer_.WriteU8('M');
+ big_endian_writer_.WriteU8('B');
+ big_endian_writer_.WriteU8(1); // Number of SSRCs.
+ big_endian_writer_.WriteU8(1); // BR Exp.
+ // BR Mantissa.
+ big_endian_writer_.WriteU16(static_cast<uint16>(kRembBitrate / 2));
+ big_endian_writer_.WriteU32(media_ssrc);
+}
+
+void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc, uint32 media_ssrc) {
+ AddRtcpHeader(206, 15);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(media_ssrc);
+ big_endian_writer_.WriteU8('C');
+ big_endian_writer_.WriteU8('A');
+ big_endian_writer_.WriteU8('S');
+ big_endian_writer_.WriteU8('T');
+ big_endian_writer_.WriteU8(kAckFrameId);
+ big_endian_writer_.WriteU8(3); // Loss fields.
+ big_endian_writer_.WriteU16(0); // Reserved.
+ big_endian_writer_.WriteU8(kLostFrameId);
+ big_endian_writer_.WriteU16(kRtcpCastAllPacketsLost);
+ big_endian_writer_.WriteU8(0); // Lost packet id mask.
+ big_endian_writer_.WriteU8(kFrameIdWithLostPackets);
+ big_endian_writer_.WriteU16(kLostPacketId1);
+ big_endian_writer_.WriteU8(0x2); // Lost packet id mask.
+ big_endian_writer_.WriteU8(kFrameIdWithLostPackets);
+ big_endian_writer_.WriteU16(kLostPacketId3);
+ big_endian_writer_.WriteU8(0); // Lost packet id mask.
+}
+
+const uint8* TestRtcpPacketBuilder::Packet() {
+ PatchLengthField();
+ return buffer_;
+}
+
+void TestRtcpPacketBuilder::PatchLengthField() {
+ if (ptr_of_length_) {
+ // Back-patch the packet length. The client must have taken
+ // care of proper padding to 32-bit words.
+ int this_packet_length = (big_endian_writer_.ptr() - ptr_of_length_ - 2);
+ DCHECK_EQ(0, this_packet_length % 4)
+ << "Packets must be a multiple of 32 bits long";
+ *ptr_of_length_ = this_packet_length >> 10;
+ *(ptr_of_length_ + 1) = (this_packet_length >> 2) & 0xFF;
+ ptr_of_length_ = NULL;
+ }
+}
+
+// Set the 5-bit value in the 1st byte of the header
+// and the payload type. Set aside room for the length field,
+// and make provision for back-patching it.
+void TestRtcpPacketBuilder::AddRtcpHeader(int payload, int format_or_count) {
+ PatchLengthField();
+ big_endian_writer_.WriteU8(0x80 | (format_or_count & 0x1F));
+ big_endian_writer_.WriteU8(payload);
+ ptr_of_length_ = big_endian_writer_.ptr();
+
+ // Initialize length to "clearly illegal".
+ big_endian_writer_.WriteU16(0xDEAD);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/test_rtcp_packet_builder.h b/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
new file mode 100644
index 00000000000..be93f0adb3c
--- /dev/null
+++ b/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
@@ -0,0 +1,94 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A very simple packet builder class for building RTCP packets.
+// Used for testing only.
+#ifndef MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
+#define MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
+
+#include "media/cast/rtcp/rtcp_defines.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+// Sender report.
+static const int kNtpHigh = 0x01020304;
+static const int kNtpLow = 0x05060708;
+static const int kRtpTimestamp = 0x10203;
+static const int kSendPacketCount = 987;
+static const int kSendOctetCount = 87654;
+
+// Report block.
+static const int kLoss = 0x01000123;
+static const int kExtendedMax = 0x15678;
+static const int kJitter = 0x10203;
+static const int kLastSr = 0x34561234;
+static const int kDelayLastSr = 1000;
+
+// DLRR block.
+static const int kLastRr = 0x34561234;
+static const int kDelayLastRr = 1000;
+
+// REMB.
+static const int kRembBitrate = 524286;
+
+// RPSI.
+static const int kPayloadtype = 126;
+static const uint64 kPictureId = 0x1234567890;
+
+// NACK.
+static const int kMissingPacket = 34567;
+
+// CAST.
+static const int kAckFrameId = 17;
+static const int kLostFrameId = 18;
+static const int kFrameIdWithLostPackets = 19;
+static const int kLostPacketId1 = 3;
+static const int kLostPacketId2 = 5;
+static const int kLostPacketId3 = 12;
+
+class TestRtcpPacketBuilder {
+ public:
+ TestRtcpPacketBuilder();
+
+ void AddSr(uint32 sender_ssrc, int number_of_report_blocks);
+ void AddRr(uint32 sender_ssrc, int number_of_report_blocks);
+ void AddRb(uint32 rtp_ssrc);
+ void AddSdesCname(uint32 sender_ssrc, const std::string& c_name);
+
+ void AddXrHeader(uint32 sender_ssrc);
+ void AddXrDlrrBlock(uint32 sender_ssrc);
+ void AddXrExtendedDlrrBlock(uint32 sender_ssrc);
+ void AddXrRrtrBlock();
+ void AddXrUnknownBlock();
+
+ void AddNack(uint32 sender_ssrc, uint32 media_ssrc);
+ void AddSendReportRequest(uint32 sender_ssrc, uint32 media_ssrc);
+
+ void AddPli(uint32 sender_ssrc, uint32 media_ssrc);
+ void AddRpsi(uint32 sender_ssrc, uint32 media_ssrc);
+ void AddRemb(uint32 sender_ssrc, uint32 media_ssrc);
+ void AddCast(uint32 sender_ssrc, uint32 media_ssrc);
+
+ const uint8* Packet();
+ int Length() { return kIpPacketSize - big_endian_writer_.remaining(); }
+
+ private:
+ void AddRtcpHeader(int payload, int format_or_count);
+ void PatchLengthField();
+
+ // Where the length field of the current packet is.
+ // Note: 0 is not a legal value, it is used for "uninitialized".
+ uint8 buffer_[kIpPacketSize];
+ char* ptr_of_length_;
+ net::BigEndianWriter big_endian_writer_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
+
+
diff --git a/chromium/media/cast/rtp_common/mock_rtp_payload_feedback.h b/chromium/media/cast/rtp_common/mock_rtp_payload_feedback.h
new file mode 100644
index 00000000000..d962ff895c5
--- /dev/null
+++ b/chromium/media/cast/rtp_common/mock_rtp_payload_feedback.h
@@ -0,0 +1,23 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_COMMON_MOCK_RTP_PAYLOAD_FEEDBACK_H_
+#define MEDIA_CAST_RTP_COMMON_MOCK_RTP_PAYLOAD_FEEDBACK_H_
+
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockRtpPayloadFeedback : public RtpPayloadFeedback {
+ public:
+ MOCK_METHOD1(CastFeedback,
+ void(const RtcpCastMessage& cast_feedback));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_COMMON_MOCK_RTP_PAYLOAD_FEEDBACK_H_
diff --git a/chromium/media/cast/rtp_common/rtp_defines.h b/chromium/media/cast/rtp_common/rtp_defines.h
new file mode 100644
index 00000000000..dc64c360340
--- /dev/null
+++ b/chromium/media/cast/rtp_common/rtp_defines.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
+#define MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
+
+#include "base/basictypes.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+#include "third_party/webrtc/modules/interface/module_common_types.h"
+
+namespace media {
+namespace cast {
+
+const uint8 kRtpMarkerBitMask = 0x80;
+
+struct RtpCastHeader {
+ void InitRTPVideoHeaderCast() {
+ is_key_frame = false;
+ frame_id = 0;
+ packet_id = 0;
+ max_packet_id = 0;
+ is_reference = false;
+ reference_frame_id = 0;
+ }
+ webrtc::WebRtcRTPHeader webrtc;
+ bool is_key_frame;
+ uint8 frame_id;
+ uint16 packet_id;
+ uint16 max_packet_id;
+ bool is_reference; // Set to true if the previous frame is not available,
+ // and the reference frame id is available.
+ uint8 reference_frame_id;
+};
+
+class RtpPayloadFeedback {
+ public:
+ virtual void CastFeedback(const RtcpCastMessage& cast_feedback) = 0;
+
+ protected:
+ virtual ~RtpPayloadFeedback() {}
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats.cc b/chromium/media/cast/rtp_receiver/receiver_stats.cc
new file mode 100644
index 00000000000..44a9b810075
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/receiver_stats.cc
@@ -0,0 +1,120 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/receiver_stats.h"
+
+#include "base/logging.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kMaxSequenceNumber = 65536;
+
+ReceiverStats::ReceiverStats(uint32 ssrc)
+ : ssrc_(ssrc),
+ min_sequence_number_(0),
+ max_sequence_number_(0),
+ total_number_packets_(0),
+ sequence_number_cycles_(0),
+ interval_min_sequence_number_(0),
+ interval_number_packets_(0),
+ interval_wrap_count_(0),
+ default_tick_clock_(),
+ clock_(&default_tick_clock_) {}
+
+ReceiverStats::~ReceiverStats() {}
+
+void ReceiverStats::GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost,
+ uint32* extended_high_sequence_number,
+ uint32* jitter) {
+ // Compute losses.
+ if (interval_number_packets_ == 0) {
+ *fraction_lost = 0;
+ } else {
+ int diff = 0;
+ if (interval_wrap_count_ == 0) {
+ diff = max_sequence_number_ - interval_min_sequence_number_ + 1;
+ } else {
+ diff = kMaxSequenceNumber * (interval_wrap_count_ - 1) +
+ (max_sequence_number_ - interval_min_sequence_number_ +
+ kMaxSequenceNumber + 1);
+ }
+
+ if (diff < 1) {
+ *fraction_lost = 0;
+ } else {
+ *fraction_lost = static_cast<uint8>((256 * (1 -
+ static_cast<float>(interval_number_packets_) / abs(diff))));
+ }
+ }
+
+ int expected_packets_num = max_sequence_number_ - min_sequence_number_ + 1;
+ if (total_number_packets_ == 0) {
+ *cumulative_lost = 0;
+ } else if (sequence_number_cycles_ == 0) {
+ *cumulative_lost = expected_packets_num - total_number_packets_;
+ } else {
+ *cumulative_lost = kMaxSequenceNumber * (sequence_number_cycles_ - 1) +
+ (expected_packets_num - total_number_packets_ + kMaxSequenceNumber);
+ }
+
+ // Extended high sequence number consists of the highest seq number and the
+ // number of cycles (wrap).
+ *extended_high_sequence_number = (sequence_number_cycles_ << 16) +
+ max_sequence_number_;
+
+ *jitter = static_cast<uint32>(abs(jitter_.InMilliseconds()));
+
+ // Reset interval values.
+ interval_min_sequence_number_ = 0;
+ interval_number_packets_ = 0;
+ interval_wrap_count_ = 0;
+}
+
+void ReceiverStats::UpdateStatistics(const RtpCastHeader& header) {
+ if (ssrc_ != header.webrtc.header.ssrc) return;
+
+ uint16 new_seq_num = header.webrtc.header.sequenceNumber;
+
+ if (interval_number_packets_ == 0) {
+ // First packet in the interval.
+ interval_min_sequence_number_ = new_seq_num;
+ }
+ if (total_number_packets_ == 0) {
+ // First incoming packet.
+ min_sequence_number_ = new_seq_num;
+ max_sequence_number_ = new_seq_num;
+ }
+
+ if (IsNewerSequenceNumber(new_seq_num, max_sequence_number_)) {
+ // Check wrap.
+ if (new_seq_num < max_sequence_number_) {
+ ++sequence_number_cycles_;
+ ++interval_wrap_count_;
+ }
+ max_sequence_number_ = new_seq_num;
+ }
+
+ // Compute Jitter.
+ base::TimeTicks now = clock_->NowTicks();
+ base::TimeDelta delta_new_timestamp =
+ base::TimeDelta::FromMilliseconds(header.webrtc.header.timestamp);
+ if (total_number_packets_ > 0) {
+ // Update jitter.
+ base::TimeDelta delta = (now - last_received_packet_time_) -
+ ((delta_new_timestamp - last_received_timestamp_) / 90000);
+ jitter_ += (delta - jitter_) / 16;
+ }
+ last_received_timestamp_ = delta_new_timestamp;
+ last_received_packet_time_ = now;
+
+ // Increment counters.
+ ++total_number_packets_;
+ ++interval_number_packets_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats.h b/chromium/media/cast/rtp_receiver/receiver_stats.h
new file mode 100644
index 00000000000..610f515c0e2
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/receiver_stats.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
+#define MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
+
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class ReceiverStats {
+ public:
+ explicit ReceiverStats(uint32 ssrc);
+ ~ReceiverStats();
+ void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter);
+ void UpdateStatistics(const RtpCastHeader& header);
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ }
+
+ private:
+ const uint32 ssrc_;
+
+ // Global metrics.
+ uint16 min_sequence_number_;
+ uint16 max_sequence_number_;
+ uint32 total_number_packets_;
+ uint16 sequence_number_cycles_;
+ base::TimeDelta last_received_timestamp_;
+ base::TimeTicks last_received_packet_time_;
+ base::TimeDelta jitter_;
+
+ // Intermediate metrics - between RTCP reports.
+ int interval_min_sequence_number_;
+ int interval_number_packets_;
+ int interval_wrap_count_;
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc b/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
new file mode 100644
index 00000000000..c6cf91ab072
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
@@ -0,0 +1,157 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <gtest/gtest.h>
+
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/receiver_stats.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+static const uint32 kStdTimeIncrementMs = 33;
+static const uint32 kSsrc = 0x1234;
+
+class ReceiverStatsTest : public ::testing::Test {
+ protected:
+ ReceiverStatsTest()
+ : stats_(kSsrc),
+ rtp_header_(),
+ fraction_lost_(0),
+ cumulative_lost_(0),
+ extended_high_sequence_number_(0),
+ jitter_(0) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ start_time_ = testing_clock_.NowTicks();
+ delta_increments_ = base::TimeDelta::FromMilliseconds(kStdTimeIncrementMs);
+ }
+ ~ReceiverStatsTest() {}
+
+ virtual void SetUp() {
+ rtp_header_.webrtc.header.sequenceNumber = 0;
+ rtp_header_.webrtc.header.timestamp = 0;
+ rtp_header_.webrtc.header.ssrc = kSsrc;
+ }
+
+ uint32 ExpectedJitter(uint32 const_interval, int num_packets) {
+ float jitter = 0;
+ // Assume timestamps have a constant kStdTimeIncrementMs interval.
+ float float_interval =
+ static_cast<float>(const_interval - kStdTimeIncrementMs);
+ for (int i = 0; i < num_packets; ++i) {
+ jitter += (float_interval - jitter) / 16;
+ }
+ return static_cast<uint32>(jitter + 0.5f);
+ }
+
+ uint32 Timestamp() {
+ base::TimeDelta delta = testing_clock_.NowTicks() - start_time_;
+ return static_cast<uint32>(delta.InMilliseconds() * 90);
+ }
+
+ ReceiverStats stats_;
+ RtpCastHeader rtp_header_;
+ uint8 fraction_lost_;
+ uint32 cumulative_lost_;
+ uint32 extended_high_sequence_number_;
+ uint32 jitter_;
+ base::SimpleTestTickClock testing_clock_;
+ base::TimeTicks start_time_;
+ base::TimeDelta delta_increments_;
+};
+
+TEST_F(ReceiverStatsTest, ResetState) {
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(0u, fraction_lost_);
+ EXPECT_EQ(0u, cumulative_lost_);
+ EXPECT_EQ(0u, extended_high_sequence_number_);
+ EXPECT_EQ(0u, jitter_);
+}
+
+TEST_F(ReceiverStatsTest, LossCount) {
+ for (int i = 0; i < 300; ++i) {
+ if (i % 4)
+ stats_.UpdateStatistics(rtp_header_);
+ if (i % 3) {
+ rtp_header_.webrtc.header.timestamp = Timestamp();
+ }
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(63u, fraction_lost_);
+ EXPECT_EQ(74u, cumulative_lost_);
+ // Build extended sequence number.
+ uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+}
+
+TEST_F(ReceiverStatsTest, NoLossWrap) {
+ rtp_header_.webrtc.header.sequenceNumber = 65500;
+ for (int i = 0; i < 300; ++i) {
+ stats_.UpdateStatistics(rtp_header_);
+ if (i % 3) {
+ rtp_header_.webrtc.header.timestamp = Timestamp();
+ }
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(0u, fraction_lost_);
+ EXPECT_EQ(0u, cumulative_lost_);
+ // Build extended sequence number (one wrap cycle).
+ uint32 extended_seq_num = (1 << 16) +
+ rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+}
+
+TEST_F(ReceiverStatsTest, LossCountWrap) {
+ const uint32 start_sequence_number = 65500;
+ rtp_header_.webrtc.header.sequenceNumber = start_sequence_number;
+ for (int i = 0; i < 300; ++i) {
+ if (i % 4)
+ stats_.UpdateStatistics(rtp_header_);
+ if (i % 3)
+ // Update timestamp.
+ ++rtp_header_.webrtc.header.timestamp;
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_EQ(63u, fraction_lost_);
+ EXPECT_EQ(74u, cumulative_lost_);
+ // Build extended sequence number (one wrap cycle).
+ uint32 extended_seq_num = (1 << 16) +
+ rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+}
+
+TEST_F(ReceiverStatsTest, Jitter) {
+ rtp_header_.webrtc.header.timestamp = Timestamp();
+ for (int i = 0; i < 300; ++i) {
+ stats_.UpdateStatistics(rtp_header_);
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ rtp_header_.webrtc.header.timestamp += 33 * 90;
+ testing_clock_.Advance(delta_increments_);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_FALSE(fraction_lost_);
+ EXPECT_FALSE(cumulative_lost_);
+ // Build extended sequence number (one wrap cycle).
+ uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+ EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs, 300), jitter_);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h b/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
new file mode 100644
index 00000000000..d39bc2a2559
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_
+#define MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_
+
+#include "media/cast/rtp_receiver/rtp_parser/rtp_feedback.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockRtpFeedback : public RtpFeedback {
+ public:
+ MOCK_METHOD4(OnInitializeDecoder,
+ int32(const int8 payloadType,
+ const int frequency,
+ const uint8 channels,
+ const uint32 rate));
+
+ MOCK_METHOD1(OnPacketTimeout,
+ void(const int32 id));
+ MOCK_METHOD2(OnReceivedPacket,
+ void(const int32 id, const RtpRtcpPacketType packet_type));
+ MOCK_METHOD2(OnPeriodicDeadOrAlive,
+ void(const int32 id, const RTPAliveType alive));
+ MOCK_METHOD2(OnIncomingSSRCChanged,
+ void(const int32 id, const uint32 ssrc));
+ MOCK_METHOD3(OnIncomingCSRCChanged,
+ void(const int32 id, const uint32 csrc, const bool added));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_ \ No newline at end of file
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
new file mode 100644
index 00000000000..0eb691be7af
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
@@ -0,0 +1,107 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+
+#include "base/logging.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+static const int kRtpCommonHeaderLength = 12;
+static const int kRtpCastHeaderLength = 7;
+static const uint8 kCastKeyFrameBitMask = 0x80;
+static const uint8 kCastReferenceFrameIdBitMask = 0x40;
+
+RtpParser::RtpParser(RtpData* incoming_payload_callback,
+ const RtpParserConfig parser_config)
+ : data_callback_(incoming_payload_callback),
+ parser_config_(parser_config) {
+}
+
+RtpParser::~RtpParser() {
+}
+
+bool RtpParser::ParsePacket(const uint8* packet, int length,
+ RtpCastHeader* rtp_header) {
+ if (length == 0) return false;
+ // Get RTP general header.
+ if (!ParseCommon(packet, length, rtp_header)) return false;
+ if (rtp_header->webrtc.header.payloadType == parser_config_.payload_type &&
+ rtp_header->webrtc.header.ssrc == parser_config_.ssrc) {
+ return ParseCast(packet + kRtpCommonHeaderLength,
+ length - kRtpCommonHeaderLength, rtp_header);
+ }
+ // Not a valid payload type / ssrc combination.
+ return false;
+}
+
+bool RtpParser::ParseCommon(const uint8* packet,
+ int length,
+ RtpCastHeader* rtp_header) {
+ if (length < kRtpCommonHeaderLength) return false;
+ uint8 version = packet[0] >> 6;
+ if (version != 2) return false;
+ uint8 cc = packet[0] & 0x0f;
+ bool marker = ((packet[1] & 0x80) != 0);
+ int payload_type = packet[1] & 0x7f;
+
+ uint16 sequence_number;
+ uint32 rtp_timestamp, ssrc;
+ net::BigEndianReader big_endian_reader(packet + 2, 80);
+ big_endian_reader.ReadU16(&sequence_number);
+ big_endian_reader.ReadU32(&rtp_timestamp);
+ big_endian_reader.ReadU32(&ssrc);
+
+ rtp_header->webrtc.header.markerBit = marker;
+ rtp_header->webrtc.header.payloadType = payload_type;
+ rtp_header->webrtc.header.sequenceNumber = sequence_number;
+ rtp_header->webrtc.header.timestamp = rtp_timestamp;
+ rtp_header->webrtc.header.ssrc = ssrc;
+ rtp_header->webrtc.header.numCSRCs = cc;
+
+ uint8 csrc_octs = cc * 4;
+ rtp_header->webrtc.type.Audio.numEnergy = rtp_header->webrtc.header.numCSRCs;
+ rtp_header->webrtc.header.headerLength = kRtpCommonHeaderLength + csrc_octs;
+ rtp_header->webrtc.type.Audio.isCNG = false;
+ rtp_header->webrtc.type.Audio.channel = parser_config_.audio_channels;
+ return true;
+}
+
+bool RtpParser::ParseCast(const uint8* packet,
+ int length,
+ RtpCastHeader* rtp_header) {
+ if (length < kRtpCastHeaderLength) return false;
+ // Extract header.
+ const uint8* data_ptr = packet;
+ int data_length = length;
+ rtp_header->is_key_frame = (data_ptr[0] & kCastKeyFrameBitMask);
+ rtp_header->is_reference = (data_ptr[0] & kCastReferenceFrameIdBitMask);
+ rtp_header->frame_id = data_ptr[1];
+
+ net::BigEndianReader big_endian_reader(data_ptr + 2, 32);
+ big_endian_reader.ReadU16(&rtp_header->packet_id);
+ big_endian_reader.ReadU16(&rtp_header->max_packet_id);
+
+ if (rtp_header->is_reference) {
+ rtp_header->reference_frame_id = data_ptr[6];
+ data_ptr += kRtpCastHeaderLength;
+ data_length -= kRtpCastHeaderLength;
+ } else {
+ data_ptr += kRtpCastHeaderLength - 1;
+ data_length -= kRtpCastHeaderLength - 1;
+ }
+
+ if (rtp_header->max_packet_id < rtp_header->packet_id) {
+ return false;
+ }
+ data_callback_->OnReceivedPayloadData(data_ptr, data_length, rtp_header);
+ return true;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi
new file mode 100644
index 00000000000..0814e55cf81
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi
@@ -0,0 +1,25 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_rtp_parser',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'rtp_parser_config.h',
+ 'rtp_parser.cc',
+ 'rtp_parser.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
new file mode 100644
index 00000000000..7f85609bf6d
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
+#define MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
+
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtpData;
+
+struct RtpParserConfig {
+ RtpParserConfig() {
+ ssrc = 0;
+ payload_type = 0;
+ audio_channels = 0;
+ }
+
+ uint32 ssrc;
+ int payload_type;
+ AudioCodec audio_codec;
+ VideoCodec video_codec;
+ int audio_channels;
+};
+
+class RtpParser {
+ public:
+ RtpParser(RtpData* incoming_payload_callback,
+ const RtpParserConfig parser_config);
+
+ ~RtpParser();
+
+ bool ParsePacket(const uint8* packet, int length,
+ RtpCastHeader* rtp_header);
+
+ private:
+ bool ParseCommon(const uint8* packet, int length,
+ RtpCastHeader* rtp_header);
+
+ bool ParseCast(const uint8* packet, int length,
+ RtpCastHeader* rtp_header);
+
+ RtpData* data_callback_;
+ RtpParserConfig parser_config_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
new file mode 100644
index 00000000000..71e6f501a52
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <gtest/gtest.h>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+#include "media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h"
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+
+namespace media {
+namespace cast {
+
+static const int kPacketLength = 1500;
+static const int kCastRtpLength = 7;
+static const int kTestPayloadType = 127;
+static const uint32 kTestSsrc = 1234;
+static const uint32 kTestTimestamp = 111111;
+static const uint16 kTestSeqNum = 4321;
+static const uint8 kRefFrameId = 17;
+
+class RtpDataTest : public RtpData {
+ public:
+ RtpDataTest() {
+ expected_header_.reset(new RtpCastHeader());
+ }
+
+ ~RtpDataTest() {}
+
+ void SetExpectedHeader(const RtpCastHeader& cast_header) {
+ memcpy(expected_header_.get(), &cast_header, sizeof(RtpCastHeader));
+ }
+
+ void OnReceivedPayloadData(const uint8* payloadData,
+ int payloadSize,
+ const RtpCastHeader* rtpHeader) {
+ VerifyCommonHeader(*rtpHeader);
+ VerifyCastHeader(*rtpHeader);
+ // TODO(mikhal): Add data verification.
+ }
+
+ void VerifyCommonHeader(const RtpCastHeader& parsed_header) {
+ EXPECT_EQ(expected_header_->packet_id == expected_header_->max_packet_id,
+ parsed_header.webrtc.header.markerBit);
+ EXPECT_EQ(kTestPayloadType, parsed_header.webrtc.header.payloadType);
+ EXPECT_EQ(kTestSsrc, parsed_header.webrtc.header.ssrc);
+ EXPECT_EQ(0, parsed_header.webrtc.header.numCSRCs);
+ }
+
+ void VerifyCastHeader(const RtpCastHeader& parsed_header) {
+ EXPECT_EQ(expected_header_->is_key_frame, parsed_header.is_key_frame);
+ EXPECT_EQ(expected_header_->frame_id, parsed_header.frame_id);
+ EXPECT_EQ(expected_header_->packet_id, parsed_header.packet_id);
+ EXPECT_EQ(expected_header_->max_packet_id, parsed_header.max_packet_id);
+ EXPECT_EQ(expected_header_->is_reference, parsed_header.is_reference);
+ }
+
+ private:
+ scoped_ptr<RtpCastHeader> expected_header_;
+};
+
+class RtpParserTest : public ::testing::Test {
+ protected:
+ RtpParserTest() {
+ PopulateConfig();
+ rtp_data_.reset(new RtpDataTest());
+ rtp_parser_.reset(new RtpParser(rtp_data_.get(), config_));
+ }
+
+ ~RtpParserTest() {}
+
+ virtual void SetUp() {
+ cast_header_.InitRTPVideoHeaderCast();
+ cast_header_.is_reference = true;
+ cast_header_.reference_frame_id = kRefFrameId;
+ packet_builder_.SetSsrc(kTestSsrc);
+ packet_builder_.SetReferenceFrameId(kRefFrameId, true);
+ packet_builder_.SetSequenceNumber(kTestSeqNum);
+ packet_builder_.SetTimestamp(kTestTimestamp);
+ packet_builder_.SetPayloadType(kTestPayloadType);
+ packet_builder_.SetMarkerBit(true); // Only one packet.
+ }
+
+ void PopulateConfig() {
+ config_.payload_type = kTestPayloadType;
+ config_.ssrc = kTestSsrc;
+ }
+
+ scoped_ptr<RtpDataTest> rtp_data_;
+ RtpPacketBuilder packet_builder_;
+ scoped_ptr<RtpParser> rtp_parser_;
+ RtpParserConfig config_;
+ RtpCastHeader cast_header_;
+};
+
+TEST_F(RtpParserTest, ParseDefaultCastPacket) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, ParseNonDefaultCastPacket) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(5);
+ packet_builder_.SetMaxPacketId(15);
+ packet_builder_.SetMarkerBit(false);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ cast_header_.is_key_frame = true;
+ cast_header_.frame_id = 10;
+ cast_header_.packet_id = 5;
+ cast_header_.max_packet_id = 15;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, TooBigPacketId) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(15);
+ packet_builder_.SetMaxPacketId(5);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, MaxPacketId) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(65535);
+ packet_builder_.SetMaxPacketId(65535);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ cast_header_.is_key_frame = true;
+ cast_header_.frame_id = 10;
+ cast_header_.packet_id = 65535;
+ cast_header_.max_packet_id = 65535;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, InvalidPayloadType) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(65535);
+ packet_builder_.SetMaxPacketId(65535);
+ packet_builder_.SetPayloadType(kTestPayloadType - 1);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, InvalidSsrc) {
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.SetKeyFrame(true);
+ packet_builder_.SetFrameId(10);
+ packet_builder_.SetPacketId(65535);
+ packet_builder_.SetMaxPacketId(65535);
+ packet_builder_.SetSsrc(kTestSsrc - 1);
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+TEST_F(RtpParserTest, ParseCastPacketWithoutReference) {
+ cast_header_.is_reference = false;
+ cast_header_.reference_frame_id = 0;
+ packet_builder_.SetReferenceFrameId(kRefFrameId, false);
+
+ // Build generic data packet.
+ uint8 packet[kPacketLength];
+ packet_builder_.BuildHeader(packet, kPacketLength);
+ // Parse packet as is.
+ RtpCastHeader rtp_header;
+ rtp_data_->SetExpectedHeader(cast_header_);
+ EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.cc b/chromium/media/cast/rtp_receiver/rtp_receiver.cc
new file mode 100644
index 00000000000..97e9b03032c
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver.cc
@@ -0,0 +1,57 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+
+#include "base/logging.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/receiver_stats.h"
+#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+
+namespace media {
+namespace cast {
+
+RtpReceiver::RtpReceiver(const AudioReceiverConfig* audio_config,
+ const VideoReceiverConfig* video_config,
+ RtpData* incoming_payload_callback) {
+ DCHECK(incoming_payload_callback) << "Invalid argument";
+ DCHECK(audio_config || video_config) << "Invalid argument";
+ // Configure parser.
+ RtpParserConfig config;
+ if (audio_config) {
+ config.ssrc = audio_config->incoming_ssrc;
+ config.payload_type = audio_config->rtp_payload_type;
+ config.audio_codec = audio_config->codec;
+ config.audio_channels = audio_config->channels;
+ } else {
+ config.ssrc = video_config->incoming_ssrc;
+ config.payload_type = video_config->rtp_payload_type;
+ config.video_codec = video_config->codec;
+ }
+ stats_.reset(new ReceiverStats(config.ssrc));
+ parser_.reset(new RtpParser(incoming_payload_callback, config));
+}
+
+RtpReceiver::~RtpReceiver() {}
+
+bool RtpReceiver::ReceivedPacket(const uint8* packet, int length) {
+ RtpCastHeader rtp_header;
+ if (!parser_->ParsePacket(packet, length, &rtp_header)) return false;
+
+ stats_->UpdateStatistics(rtp_header);
+ return true;
+}
+
+void RtpReceiver::GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost,
+ uint32* extended_high_sequence_number,
+ uint32* jitter) {
+ stats_->GetStatistics(fraction_lost,
+ cumulative_lost,
+ extended_high_sequence_number,
+ jitter);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.gyp b/chromium/media/cast/rtp_receiver/rtp_receiver.gyp
new file mode 100644
index 00000000000..c1d4d5adf05
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver.gyp
@@ -0,0 +1,27 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_rtp_receiver',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'receiver_stats.cc',
+ 'receiver_stats.h',
+ 'rtp_receiver.cc',
+ 'rtp_receiver.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ 'rtp_parser/rtp_parser.gypi:*',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.h b/chromium/media/cast/rtp_receiver/rtp_receiver.h
new file mode 100644
index 00000000000..6cac8cadd70
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Interface to the rtp receiver.
+
+#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
+#define MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+class RtpData {
+ public:
+ virtual void OnReceivedPayloadData(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader* rtp_header) = 0;
+
+ protected:
+ virtual ~RtpData() {}
+};
+
+class ReceiverStats;
+class RtpParser;
+
+class RtpReceiver {
+ public:
+ RtpReceiver(const AudioReceiverConfig* audio_config,
+ const VideoReceiverConfig* video_config,
+ RtpData* incoming_payload_callback);
+ ~RtpReceiver();
+
+ bool ReceivedPacket(const uint8* packet, int length);
+
+ void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter);
+
+ private:
+ scoped_ptr<ReceiverStats> stats_;
+ scoped_ptr<RtpParser> parser_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
diff --git a/chromium/media/cast/rtp_sender/mock_rtp_sender.h b/chromium/media/cast/rtp_sender/mock_rtp_sender.h
new file mode 100644
index 00000000000..334bc885db5
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/mock_rtp_sender.h
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_SENDER_MOCK_RTP_SENDER_H_
+#define MEDIA_CAST_RTP_SENDER_MOCK_RTP_SENDER_H_
+
+#include <vector>
+
+#include "media/cast/rtp_sender/rtp_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockRtpSender : public RtpSender {
+ public:
+ MOCK_METHOD2(IncomingEncodedVideoFrame,
+ bool(const EncodedVideoFrame& frame, int64 capture_time));
+
+ MOCK_METHOD2(IncomingEncodedAudioFrame,
+ bool(const EncodedAudioFrame& frame, int64 recorded_time));
+
+ MOCK_METHOD3(ResendPacket,
+ bool(bool is_audio, uint8 frame_id, uint16 packet_id));
+
+ MOCK_METHOD0(RtpStatistics, void());
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_SENDER_MOCK_RTP_SENDER_H_
+
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.cc b/chromium/media/cast/rtp_sender/packet_storage/packet_storage.cc
new file mode 100644
index 00000000000..9c2d7ff0884
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/packet_storage/packet_storage.cc
@@ -0,0 +1,141 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
+
+#include <string>
+
+#include "base/logging.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
+
+namespace media {
+namespace cast {
+
+// Limit the max time delay to avoid frame id wrap around; 256 / 60 fps.
+const int kMaxAllowedTimeStoredMs = 4000;
+
+typedef PacketMap::iterator PacketMapIterator;
+typedef TimeToPacketMap::iterator TimeToPacketIterator;
+
+class StoredPacket {
+ public:
+ StoredPacket() {
+ packet_.reserve(kIpPacketSize);
+ }
+
+ void Save(const std::vector<uint8>& packet) {
+ DCHECK_LT(packet.size(), kIpPacketSize) << "Invalid argument";
+ packet_.clear();
+ packet_.insert(packet_.begin(), packet.begin(), packet.end());
+ }
+
+ void GetCopy(std::vector<uint8>* packet) {
+ packet->insert(packet->begin(), packet_.begin(), packet_.end());
+ }
+
+ private:
+ std::vector<uint8> packet_;
+};
+
+
+PacketStorage::PacketStorage(int max_time_stored_ms)
+ : default_tick_clock_(new base::DefaultTickClock()),
+ clock_(default_tick_clock_.get()) {
+ max_time_stored_ = base::TimeDelta::FromMilliseconds(max_time_stored_ms);
+ DCHECK_LE(max_time_stored_ms, kMaxAllowedTimeStoredMs) << "Invalid argument";
+}
+
+PacketStorage::~PacketStorage() {
+ time_to_packet_map_.clear();
+
+ PacketMapIterator store_it = stored_packets_.begin();
+ for (; store_it != stored_packets_.end();
+ store_it = stored_packets_.begin()) {
+ stored_packets_.erase(store_it);
+ }
+ while (!free_packets_.empty()) {
+ free_packets_.pop_front();
+ }
+}
+
+void PacketStorage::CleanupOldPackets(base::TimeTicks now) {
+ TimeToPacketIterator time_it = time_to_packet_map_.begin();
+
+ // Check max size.
+ while (time_to_packet_map_.size() >= kMaxStoredPackets) {
+ PacketMapIterator store_it = stored_packets_.find(time_it->second);
+
+ // We should always find the packet.
+ DCHECK(store_it != stored_packets_.end()) << "Invalid state";
+ time_to_packet_map_.erase(time_it);
+ // Save the pointer.
+ linked_ptr<StoredPacket> storted_packet = store_it->second;
+ stored_packets_.erase(store_it);
+ // Add this packet to the free list for later re-use.
+ free_packets_.push_back(storted_packet);
+ time_it = time_to_packet_map_.begin();
+ }
+
+ // Time out old packets.
+ while (time_it != time_to_packet_map_.end()) {
+ if (now < time_it->first + max_time_stored_) {
+ break;
+ }
+ // Packet too old.
+ PacketMapIterator store_it = stored_packets_.find(time_it->second);
+
+ // We should always find the packet.
+ DCHECK(store_it != stored_packets_.end()) << "Invalid state";
+ time_to_packet_map_.erase(time_it);
+ // Save the pointer.
+ linked_ptr<StoredPacket> storted_packet = store_it->second;
+ stored_packets_.erase(store_it);
+ // Add this packet to the free list for later re-use.
+ free_packets_.push_back(storted_packet);
+ time_it = time_to_packet_map_.begin();
+ }
+}
+
+void PacketStorage::StorePacket(uint8 frame_id,
+ uint16 packet_id,
+ const std::vector<uint8>& packet) {
+ base::TimeTicks now = clock_->NowTicks();
+ CleanupOldPackets(now);
+
+ uint32 index = (static_cast<uint32>(frame_id) << 16) + packet_id;
+ PacketMapIterator it = stored_packets_.find(index);
+ if (it != stored_packets_.end()) {
+ // We have already saved this.
+ DCHECK(false) << "Invalid state";
+ return;
+ }
+ linked_ptr<StoredPacket> stored_packet;
+ if (free_packets_.empty()) {
+ // No previous allocated packets allocate one.
+ stored_packet.reset(new StoredPacket());
+ } else {
+ // Re-use previous allocated packet.
+ stored_packet = free_packets_.front();
+ free_packets_.pop_front();
+ }
+ stored_packet->Save(packet);
+ stored_packets_[index] = stored_packet;
+ time_to_packet_map_.insert(std::make_pair(now, index));
+}
+
+bool PacketStorage::GetPacket(uint8 frame_id,
+ uint16 packet_id,
+ std::vector<uint8>* packet) {
+ uint32 index = (static_cast<uint32>(frame_id) << 16) + packet_id;
+ PacketMapIterator it = stored_packets_.find(index);
+ if (it == stored_packets_.end()) {
+ return false;
+ }
+ it->second->GetCopy(packet);
+ return true;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.gypi b/chromium/media/cast/rtp_sender/packet_storage/packet_storage.gypi
new file mode 100644
index 00000000000..f691d9e9b69
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/packet_storage/packet_storage.gypi
@@ -0,0 +1,23 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'packet_storage',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'packet_storage.h',
+ 'packet_storage.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ ],
+ },
+ ],
+}
+
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.h b/chromium/media/cast/rtp_sender/packet_storage/packet_storage.h
new file mode 100644
index 00000000000..e1e3bcbe121
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/packet_storage/packet_storage.h
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_SENDER_PACKET_STORAGE_INCLUDE_PACKET_STORAGE_H_
+#define MEDIA_CAST_RTP_SENDER_PACKET_STORAGE_INCLUDE_PACKET_STORAGE_H_
+
+#include <list>
+#include <map>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+class StoredPacket;
+typedef std::map<uint32, linked_ptr<StoredPacket> > PacketMap;
+typedef std::multimap<base::TimeTicks, uint32> TimeToPacketMap;
+
+class PacketStorage {
+ public:
+ static const int kMaxStoredPackets = 1000;
+
+ explicit PacketStorage(int max_time_stored_ms);
+ virtual ~PacketStorage();
+
+ void StorePacket(uint8 frame_id,
+ uint16 packet_id,
+ const std::vector<uint8>& packet);
+
+ // Copies packet into the buffer pointed to by rtp_buffer.
+ bool GetPacket(uint8 frame_id,
+ uint16 packet_id,
+ std::vector<uint8>* packet);
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ }
+
+ private:
+ void CleanupOldPackets(base::TimeTicks now);
+
+ base::TimeDelta max_time_stored_;
+ PacketMap stored_packets_;
+ TimeToPacketMap time_to_packet_map_;
+ std::list<linked_ptr<StoredPacket> > free_packets_;
+ scoped_ptr<base::TickClock> default_tick_clock_;
+ base::TickClock* clock_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_SENDER_PACKET_STORAGE_INCLUDE_PACKET_STORAGE_H_
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc b/chromium/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc
new file mode 100644
index 00000000000..d6de08d4866
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc
@@ -0,0 +1,114 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <vector>
+
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+static const int kMaxDeltaStoredMs = 500;
+static const base::TimeDelta kDeltaBetweenFrames =
+ base::TimeDelta::FromMilliseconds(33);
+
+static const int64 kStartMillisecond = 123456789;
+
+class PacketStorageTest : public ::testing::Test {
+ protected:
+ PacketStorageTest() : packet_storage_(kMaxDeltaStoredMs) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ packet_storage_.set_clock(&testing_clock_);
+ }
+
+ PacketStorage packet_storage_;
+ base::SimpleTestTickClock testing_clock_;
+};
+
+TEST_F(PacketStorageTest, TimeOut) {
+ std::vector<uint8> test_123(100, 123); // 100 insertions of the value 123.
+
+ for (uint8 frame_id = 0; frame_id < 30; ++frame_id) {
+ for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
+ packet_storage_.StorePacket(frame_id, packet_id, test_123);
+ }
+ testing_clock_.Advance(kDeltaBetweenFrames);
+ }
+
+ // All packets belonging to the first 14 frames is expected to be expired.
+ for (uint8 frame_id = 0; frame_id < 14; ++frame_id) {
+ for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
+ std::vector<uint8> packet;
+ EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+ }
+ }
+ // All packets belonging to the next 15 frames is expected to be valid.
+ for (uint8 frame_id = 14; frame_id < 30; ++frame_id) {
+ for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
+ std::vector<uint8> packet;
+ EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+ EXPECT_TRUE(packet == test_123);
+ }
+ }
+}
+
+TEST_F(PacketStorageTest, MaxNumberOfPackets) {
+ std::vector<uint8> test_123(100, 123); // 100 insertions of the value 123.
+
+ uint8 frame_id = 0;
+ for (uint16 packet_id = 0; packet_id <= PacketStorage::kMaxStoredPackets;
+ ++packet_id) {
+ packet_storage_.StorePacket(frame_id, packet_id, test_123);
+ }
+ std::vector<uint8> packet;
+ uint16 packet_id = 0;
+ EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+
+ ++packet_id;
+ for (; packet_id <= PacketStorage::kMaxStoredPackets; ++packet_id) {
+ std::vector<uint8> packet;
+ EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+ EXPECT_TRUE(packet == test_123);
+ }
+}
+
+TEST_F(PacketStorageTest, PacketContent) {
+ std::vector<uint8> test_123(100, 123); // 100 insertions of the value 123.
+ std::vector<uint8> test_234(200, 234); // 200 insertions of the value 234.
+
+ for (uint8 frame_id = 0; frame_id < 10; ++frame_id) {
+ for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
+ // Every other packet.
+ if (packet_id % 2 == 0) {
+ packet_storage_.StorePacket(frame_id, packet_id, test_123);
+ } else {
+ packet_storage_.StorePacket(frame_id, packet_id, test_234);
+ }
+ }
+ testing_clock_.Advance(kDeltaBetweenFrames);
+ }
+ for (uint8 frame_id = 0; frame_id < 10; ++frame_id) {
+ for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
+ std::vector<uint8> packet;
+ EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+ // Every other packet.
+ if (packet_id % 2 == 0) {
+ EXPECT_TRUE(packet == test_123);
+ } else {
+ EXPECT_TRUE(packet == test_234);
+ }
+ }
+ }
+}
+
+} // namespace cast
+} // namespace media
+
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
new file mode 100644
index 00000000000..6900bc24b38
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -0,0 +1,147 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+
+#include "base/logging.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+static const uint16 kCommonRtpHeaderLength = 12;
+static const uint16 kCastRtpHeaderLength = 7;
+static const uint8 kCastKeyFrameBitMask = 0x80;
+static const uint8 kCastReferenceFrameIdBitMask = 0x40;
+
+RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
+ PacketStorage* packet_storage,
+ RtpPacketizerConfig rtp_packetizer_config)
+ : config_(rtp_packetizer_config),
+ transport_(transport),
+ packet_storage_(packet_storage),
+ sequence_number_(config_.sequence_number),
+ rtp_timestamp_(config_.rtp_timestamp),
+ frame_id_(0),
+ packet_id_(0),
+ send_packets_count_(0),
+ send_octet_count_(0) {
+ DCHECK(transport) << "Invalid argument";
+}
+
+RtpPacketizer::~RtpPacketizer() {}
+
+void RtpPacketizer::IncomingEncodedVideoFrame(
+ const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time) {
+ DCHECK(!config_.audio) << "Invalid state";
+ if (config_.audio) return;
+
+ base::TimeTicks zero_time;
+ base::TimeDelta capture_delta = capture_time - zero_time;
+
+ // Timestamp is in 90 KHz for video.
+ rtp_timestamp_ = static_cast<uint32>(capture_delta.InMilliseconds() * 90);
+ time_last_sent_rtp_timestamp_ = capture_time;
+
+ Cast(video_frame->key_frame,
+ video_frame->last_referenced_frame_id,
+ rtp_timestamp_,
+ video_frame->data);
+}
+
+void RtpPacketizer::IncomingEncodedAudioFrame(
+ const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time) {
+ DCHECK(config_.audio) << "Invalid state";
+ if (!config_.audio) return;
+
+ rtp_timestamp_ += audio_frame->samples; // Timestamp is in samples for audio.
+ time_last_sent_rtp_timestamp_ = recorded_time;
+ Cast(true, 0, rtp_timestamp_, audio_frame->data);
+}
+
+uint16 RtpPacketizer::NextSequenceNumber() {
+ ++sequence_number_;
+ return sequence_number_ - 1;
+}
+
+bool RtpPacketizer::LastSentTimestamp(base::TimeTicks* time_sent,
+ uint32* rtp_timestamp) const {
+ if (time_last_sent_rtp_timestamp_.is_null()) return false;
+
+ *time_sent = time_last_sent_rtp_timestamp_;
+ *rtp_timestamp = rtp_timestamp_;
+ return true;
+}
+
+void RtpPacketizer::Cast(bool is_key,
+ uint8 reference_frame_id,
+ uint32 timestamp,
+ std::vector<uint8> data) {
+ uint16 rtp_header_length = kCommonRtpHeaderLength + kCastRtpHeaderLength;
+ uint16 max_length = config_.max_payload_length - rtp_header_length - 1;
+ // Split the payload evenly (round number up).
+ uint32 num_packets = (data.size() + max_length) / max_length;
+ uint32 payload_length = (data.size() + num_packets) / num_packets;
+ DCHECK_LE(payload_length, max_length) << "Invalid argument";
+
+ std::vector<uint8> packet;
+ packet.reserve(kIpPacketSize);
+ size_t remaining_size = data.size();
+ uint8* data_ptr = data.data();
+ while (remaining_size > 0) {
+ packet.clear();
+ if (remaining_size < payload_length) {
+ payload_length = remaining_size;
+ }
+ remaining_size -= payload_length;
+ BuildCommonRTPheader(&packet, remaining_size == 0, timestamp);
+ // Build Cast header.
+ packet.push_back(
+ (is_key ? kCastKeyFrameBitMask : 0) | kCastReferenceFrameIdBitMask);
+ packet.push_back(frame_id_);
+ int start_size = packet.size();
+ packet.resize(start_size + 32);
+ net::BigEndianWriter big_endian_writer(&((packet)[start_size]), 32);
+ big_endian_writer.WriteU16(packet_id_);
+ big_endian_writer.WriteU16(num_packets - 1);
+ packet.push_back(reference_frame_id);
+
+ // Copy payload data.
+ packet.insert(packet.end(), data_ptr, data_ptr + payload_length);
+ // Store packet.
+ packet_storage_->StorePacket(frame_id_, packet_id_, packet);
+ // Send to network.
+ transport_->SendPacket(packet, num_packets);
+ ++packet_id_;
+ data_ptr += payload_length;
+ // Update stats.
+ ++send_packets_count_;
+ send_octet_count_ += payload_length;
+ }
+ DCHECK(packet_id_ == num_packets) << "Invalid state";
+ // Prepare for next frame.
+ packet_id_ = 0;
+ frame_id_ = static_cast<uint8>(frame_id_ + 1);
+}
+
+void RtpPacketizer::BuildCommonRTPheader(
+ std::vector<uint8>* packet, bool marker_bit, uint32 time_stamp) {
+ packet->push_back(0x80);
+ packet->push_back(static_cast<uint8>(config_.payload_type) |
+ (marker_bit ? kRtpMarkerBitMask : 0));
+ int start_size = packet->size();
+ packet->resize(start_size + 80);
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 80);
+ big_endian_writer.WriteU16(sequence_number_);
+ big_endian_writer.WriteU32(time_stamp);
+ big_endian_writer.WriteU32(config_.ssrc);
+ ++sequence_number_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.gypi b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.gypi
new file mode 100644
index 00000000000..09ceb3b6354
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.gypi
@@ -0,0 +1,25 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_rtp_packetizer',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc/',
+ ],
+ 'sources': [
+ 'rtp_packetizer.cc',
+ 'rtp_packetizer.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/net/net.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
new file mode 100644
index 00000000000..63035d098db
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+#define MEDIA_CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+
+#include <cmath>
+#include <list>
+#include <map>
+
+#include "base/time/time.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
+
+namespace media {
+namespace cast {
+
+class PacedPacketSender;
+
+// This object is only called from the main cast thread.
+// This class break encoded audio and video frames into packets and add an RTP
+// header to each packet.
+class RtpPacketizer {
+ public:
+ RtpPacketizer(PacedPacketSender* transport,
+ PacketStorage* packet_storage,
+ RtpPacketizerConfig rtp_packetizer_config);
+ ~RtpPacketizer();
+
+ // The video_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
+
+ // The audio_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time);
+
+ bool LastSentTimestamp(base::TimeTicks* time_sent,
+ uint32* rtp_timestamp) const;
+
+ // Return the next sequence number, and increment by one. Enables unique
+ // incremental sequence numbers for every packet (including retransmissions).
+ uint16 NextSequenceNumber();
+
+ int send_packets_count() { return send_packets_count_; }
+ int send_octet_count() { return send_octet_count_; }
+
+ private:
+ void Cast(bool is_key, uint8 reference_frame_id,
+ uint32 timestamp, std::vector<uint8> data);
+ void BuildCommonRTPheader(std::vector<uint8>* packet, bool marker_bit,
+ uint32 time_stamp);
+
+ RtpPacketizerConfig config_;
+ PacedPacketSender* transport_;
+ PacketStorage* packet_storage_;
+
+ base::TimeTicks time_last_sent_rtp_timestamp_;
+ uint16 sequence_number_;
+ uint32 rtp_timestamp_;
+ uint8 frame_id_;
+ uint16 packet_id_;
+
+ int send_packets_count_;
+ int send_octet_count_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h
new file mode 100644
index 00000000000..cd005d53c85
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h
@@ -0,0 +1,47 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
+#define CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
+
+#include "media/cast/cast_config.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+
+namespace media {
+namespace cast {
+
+struct RtpPacketizerConfig {
+ RtpPacketizerConfig() {
+ ssrc = 0;
+ max_payload_length = kIpPacketSize - 28; // Default is IP-v4/UDP.
+ audio = false;
+ frequency = 8000;
+ payload_type = -1;
+ sequence_number = 0;
+ rtp_timestamp = 0;
+ }
+
+ // General.
+ bool audio;
+ int payload_type;
+ uint16 max_payload_length;
+ uint16 sequence_number;
+ uint32 rtp_timestamp;
+ int frequency;
+
+ // SSRC.
+ unsigned int ssrc;
+
+ // Video.
+ VideoCodec video_codec;
+
+ // Audio.
+ uint8 channels;
+ AudioCodec audio_codec;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
new file mode 100644
index 00000000000..bed7cba2e8f
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+
+#include <gtest/gtest.h>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.h"
+
+namespace media {
+namespace cast {
+
+static const int kPayload = 127;
+static const uint32 kTimestampMs = 10;
+static const uint16 kSeqNum = 33;
+static const int kTimeOffset = 22222;
+static const int kMaxPacketLength = 1500;
+static const bool kMarkerBit = true;
+static const int kSsrc = 0x12345;
+static const uint8 kFrameId = 1;
+static const unsigned int kFrameSize = 5000;
+static const int kTotalHeaderLength = 19;
+static const int kMaxPacketStorageTimeMs = 300;
+
+class TestRtpPacketTransport : public PacedPacketSender {
+ public:
+ explicit TestRtpPacketTransport(RtpPacketizerConfig config)
+ : config_(config),
+ sequence_number_(kSeqNum),
+ packets_sent_(0),
+ expected_number_of_packets_(0) {}
+
+ void VerifyRtpHeader(const RtpCastHeader& rtp_header) {
+ VerifyCommonRtpHeader(rtp_header);
+ VerifyCastRtpHeader(rtp_header);
+ }
+
+ void VerifyCommonRtpHeader(const RtpCastHeader& rtp_header) {
+ EXPECT_EQ(expected_number_of_packets_ == packets_sent_,
+ rtp_header.webrtc.header.markerBit);
+ EXPECT_EQ(kPayload, rtp_header.webrtc.header.payloadType);
+ EXPECT_EQ(sequence_number_, rtp_header.webrtc.header.sequenceNumber);
+ EXPECT_EQ(kTimestampMs * 90, rtp_header.webrtc.header.timestamp);
+ EXPECT_EQ(config_.ssrc, rtp_header.webrtc.header.ssrc);
+ EXPECT_EQ(0, rtp_header.webrtc.header.numCSRCs);
+ }
+
+ void VerifyCastRtpHeader(const RtpCastHeader& rtp_header) {
+ // TODO(mikhal)
+ }
+
+ virtual bool SendPacket(const std::vector<uint8>& packet,
+ int num_packets) OVERRIDE {
+ EXPECT_EQ(expected_number_of_packets_, num_packets);
+ ++packets_sent_;
+ RtpHeaderParser parser(packet.data(), packet.size());
+ RtpCastHeader rtp_header;
+ parser.Parse(&rtp_header);
+ VerifyRtpHeader(rtp_header);
+ ++sequence_number_;
+ return true;
+ }
+
+ virtual bool ResendPacket(const std::vector<uint8>& packet,
+ int num_of_packets) OVERRIDE {
+ EXPECT_TRUE(false);
+ return false;
+ }
+
+ virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
+ EXPECT_TRUE(false);
+ return false;
+ }
+
+ void SetExpectedNumberOfPackets(int num) {
+ expected_number_of_packets_ = num;
+ }
+
+ RtpPacketizerConfig config_;
+ uint32 sequence_number_;
+ int packets_sent_;
+ int expected_number_of_packets_;
+};
+
+class RtpPacketizerTest : public ::testing::Test {
+ protected:
+ RtpPacketizerTest()
+ :video_frame_(),
+ packet_storage_(kMaxPacketStorageTimeMs) {
+ config_.sequence_number = kSeqNum;
+ config_.ssrc = kSsrc;
+ config_.payload_type = kPayload;
+ config_.max_payload_length = kMaxPacketLength;
+ transport_.reset(new TestRtpPacketTransport(config_));
+ rtp_packetizer_.reset(
+ new RtpPacketizer(transport_.get(), &packet_storage_, config_));
+ }
+
+ ~RtpPacketizerTest() {}
+
+ void SetUp() {
+ video_frame_.key_frame = false;
+ video_frame_.frame_id = kFrameId;
+ video_frame_.last_referenced_frame_id = kFrameId - 1;
+ video_frame_.data.assign(kFrameSize, 123);
+ }
+
+ scoped_ptr<RtpPacketizer> rtp_packetizer_;
+ RtpPacketizerConfig config_;
+ scoped_ptr<TestRtpPacketTransport> transport_;
+ EncodedVideoFrame video_frame_;
+ PacketStorage packet_storage_;
+};
+
+TEST_F(RtpPacketizerTest, SendStandardPackets) {
+ int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
+ transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
+
+ base::TimeTicks time;
+ time += base::TimeDelta::FromMilliseconds(kTimestampMs);
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,time);
+}
+
+TEST_F(RtpPacketizerTest, Stats) {
+ EXPECT_FALSE(rtp_packetizer_->send_packets_count());
+ EXPECT_FALSE(rtp_packetizer_->send_octet_count());
+ // Insert packets at varying lengths.
+ unsigned int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
+ transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
+
+ base::TimeTicks time;
+ time += base::TimeDelta::FromMilliseconds(kTimestampMs);
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
+ EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packets_count());
+ EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_sender/rtp_sender.cc b/chromium/media/cast/rtp_sender/rtp_sender.cc
new file mode 100644
index 00000000000..ecaae40dd7a
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_sender.cc
@@ -0,0 +1,147 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+
+namespace media {
+namespace cast {
+
+RtpSender::RtpSender(const AudioSenderConfig* audio_config,
+ const VideoSenderConfig* video_config,
+ PacedPacketSender* transport)
+ : config_(),
+ transport_(transport),
+ default_tick_clock_(new base::DefaultTickClock()),
+ clock_(default_tick_clock_.get()) {
+ // Store generic cast config and create packetizer config.
+ DCHECK(audio_config || video_config) << "Invalid argument";
+ if (audio_config) {
+ storage_.reset(new PacketStorage(audio_config->rtp_history_ms));
+ config_.audio = true;
+ config_.ssrc = audio_config->sender_ssrc;
+ config_.payload_type = audio_config->rtp_payload_type;
+ config_.frequency = audio_config->frequency;
+ config_.audio_codec = audio_config->codec;
+ } else {
+ storage_.reset(new PacketStorage(video_config->rtp_history_ms));
+ config_.audio = false;
+ config_.ssrc = video_config->sender_ssrc;
+ config_.payload_type = video_config->rtp_payload_type;
+ config_.frequency = kVideoFrequency;
+ config_.video_codec = video_config->codec;
+ }
+ // Randomly set start values.
+ config_.sequence_number = base::RandInt(0, 65535);
+ config_.rtp_timestamp = base::RandInt(0, 65535);
+ config_.rtp_timestamp += base::RandInt(0, 65535) << 16;
+ packetizer_.reset(new RtpPacketizer(transport, storage_.get(), config_));
+}
+
+RtpSender::~RtpSender() {}
+
+void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time) {
+ packetizer_->IncomingEncodedVideoFrame(video_frame, capture_time);
+}
+
+void RtpSender::IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time) {
+ packetizer_->IncomingEncodedAudioFrame(audio_frame, recorded_time);
+}
+
+void RtpSender::ResendPackets(
+ const MissingFramesAndPackets& missing_frames_and_packets) {
+ std::vector<uint8> packet;
+ // Iterate over all frames in the list.
+ for (std::map<uint8, std::set<uint16> >::const_iterator it =
+ missing_frames_and_packets.begin();
+ it != missing_frames_and_packets.end(); ++it) {
+ uint8 frame_id = it->first;
+ // Iterate over all of the packets in the frame.
+ const std::set<uint16>& packets = it->second;
+ if (packets.empty()) {
+ VLOG(1) << "Missing all packets in frame " << static_cast<int>(frame_id);
+
+ bool success = false;
+ uint16 packet_id = 0;
+ do {
+ // Get packet from storage.
+ packet.clear();
+ success = storage_->GetPacket(frame_id, packet_id, &packet);
+
+ // Resend packet to the network.
+ if (success) {
+ VLOG(1) << "Resend " << static_cast<int>(frame_id) << ":"
+ << packet_id << " size: " << packets.size();
+ // Set a unique incremental sequence number for every packet.
+ UpdateSequenceNumber(&packet);
+ // Set the size as correspond to each frame.
+ transport_->ResendPacket(packet, packets.size());
+ ++packet_id;
+ }
+ } while (success);
+
+ } else {
+ for (std::set<uint16>::const_iterator set_it = packets.begin();
+ set_it != packets.end(); ++set_it) {
+ uint16 packet_id = *set_it;
+ // Get packet from storage.
+ packet.clear();
+ bool success = storage_->GetPacket(frame_id, packet_id, &packet);
+ // Resend packet to the network.
+ if (success) {
+ VLOG(1) << "Resend " << static_cast<int>(frame_id) << ":"
+ << packet_id << " size: " << packet.size();
+ UpdateSequenceNumber(&packet);
+ // Set the size as correspond to each frame.
+ transport_->ResendPacket(packet, packets.size());
+ } else {
+ VLOG(1) << "Failed to resend " << static_cast<int>(frame_id) << ":"
+ << packet_id;
+ }
+ }
+ }
+ }
+}
+
+void RtpSender::UpdateSequenceNumber(std::vector<uint8>* packet) {
+ uint16 new_sequence_number = packetizer_->NextSequenceNumber();
+ int index = 2;
+ (*packet)[index] = (static_cast<uint8>(new_sequence_number));
+ (*packet)[index + 1] =(static_cast<uint8>(new_sequence_number >> 8));
+}
+
+void RtpSender::RtpStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) {
+ // The timestamp of this Rtcp packet should be estimated as the timestamp of
+ // the frame being captured at this moment. We are calculating that
+ // timestamp as the last frame's timestamp + the time since the last frame
+ // was captured.
+ uint32 ntp_seconds = 0;
+ uint32 ntp_fraction = 0;
+ ConvertTimeToNtp(now, &ntp_seconds, &ntp_fraction);
+ // sender_info->ntp_seconds = ntp_seconds;
+ sender_info->ntp_fraction = ntp_fraction;
+
+ base::TimeTicks time_sent;
+ uint32 rtp_timestamp;
+ if (packetizer_->LastSentTimestamp(&time_sent, &rtp_timestamp)) {
+ base::TimeDelta time_since_last_send = now - time_sent;
+ sender_info->rtp_timestamp = rtp_timestamp +
+ time_since_last_send.InMilliseconds() * (config_.frequency / 1000);
+ } else {
+ sender_info->rtp_timestamp = 0;
+ }
+ sender_info->send_packet_count = packetizer_->send_packets_count();
+ sender_info->send_octet_count = packetizer_->send_octet_count();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_sender/rtp_sender.gyp b/chromium/media/cast/rtp_sender/rtp_sender.gyp
new file mode 100644
index 00000000000..77722c9d381
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_sender.gyp
@@ -0,0 +1,27 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_rtp_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc/',
+ ],
+ 'sources': [
+ 'rtp_sender.cc',
+ 'rtp_sender.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ 'packet_storage/packet_storage.gypi:*',
+ 'rtp_packetizer/rtp_packetizer.gypi:*',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/rtp_sender/rtp_sender.h b/chromium/media/cast/rtp_sender/rtp_sender.h
new file mode 100644
index 00000000000..f6e59acba84
--- /dev/null
+++ b/chromium/media/cast/rtp_sender/rtp_sender.h
@@ -0,0 +1,74 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the interface to the cast RTP sender.
+
+#ifndef MEDIA_CAST_RTP_SENDER_RTP_SENDER_H_
+#define MEDIA_CAST_RTP_SENDER_RTP_SENDER_H_
+
+#include <map>
+#include <set>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
+
+namespace media {
+namespace cast {
+
+class PacedPacketSender;
+struct RtcpSenderInfo;
+
+typedef std::map<uint8, std::set<uint16> > MissingFramesAndPackets;
+
+// This object is only called from the main cast thread.
+// This class handles splitting encoded audio and video frames into packets and
+// add an RTP header to each packet. The sent packets are stored until they are
+// acknowledged by the remote peer or timed out.
+class RtpSender {
+ public:
+ RtpSender(const AudioSenderConfig* audio_config,
+ const VideoSenderConfig* video_config,
+ PacedPacketSender* transport);
+
+ ~RtpSender();
+
+ // The video_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
+
+ // The audio_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time);
+
+ void ResendPackets(const MissingFramesAndPackets& missing_packets);
+
+ void RtpStatistics(const base::TimeTicks& now, RtcpSenderInfo* sender_info);
+
+ // Used for testing.
+ void set_clock(base::TickClock* clock) {
+ // TODO(pwestin): review how we pass in a clock for testing.
+ clock_ = clock;
+ }
+
+ private:
+ void UpdateSequenceNumber(std::vector<uint8>* packet);
+
+ RtpPacketizerConfig config_;
+ scoped_ptr<RtpPacketizer> packetizer_;
+ scoped_ptr<PacketStorage> storage_;
+ PacedPacketSender* transport_;
+ scoped_ptr<base::TickClock> default_tick_clock_;
+ base::TickClock* clock_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_SENDER_RTP_SENDER_H_
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
new file mode 100644
index 00000000000..93d3eb5c4aa
--- /dev/null
+++ b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
@@ -0,0 +1,68 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
+
+#include "base/logging.h"
+#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
+
+namespace media {
+namespace cast {
+
+Vp8Decoder::Vp8Decoder(int number_of_cores) {
+ decoder_.reset(new vpx_dec_ctx_t());
+ InitDecode(number_of_cores);
+}
+
+Vp8Decoder::~Vp8Decoder() {}
+
+void Vp8Decoder::InitDecode(int number_of_cores) {
+ vpx_codec_dec_cfg_t cfg;
+ cfg.threads = number_of_cores;
+ vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC;
+
+ if (vpx_codec_dec_init(decoder_.get(), vpx_codec_vp8_dx(), &cfg, flags)) {
+ DCHECK(false) << "VP8 decode error";
+ }
+}
+
+bool Vp8Decoder::Decode(const EncodedVideoFrame& input_image,
+ I420VideoFrame* decoded_frame) {
+ if (input_image.data.empty()) return false;
+
+ vpx_codec_iter_t iter = NULL;
+ vpx_image_t* img;
+ if (vpx_codec_decode(decoder_.get(),
+ input_image.data.data(),
+ input_image.data.size(),
+ 0,
+ 1 /* real time*/)) {
+ return false;
+ }
+
+ img = vpx_codec_get_frame(decoder_.get(), &iter);
+ if (img == NULL) return false;
+
+ // Populate the decoded image.
+ decoded_frame->width = img->d_w;
+ decoded_frame->height = img->d_h;
+
+ decoded_frame->y_plane.stride = img->stride[VPX_PLANE_Y];
+ decoded_frame->y_plane.length = img->stride[VPX_PLANE_Y] * img->d_h;
+ decoded_frame->y_plane.data = img->planes[VPX_PLANE_Y];
+
+ decoded_frame->u_plane.stride = img->stride[VPX_PLANE_U];
+ decoded_frame->u_plane.length = img->stride[VPX_PLANE_U] * img->d_h;
+ decoded_frame->u_plane.data = img->planes[VPX_PLANE_U];
+
+ decoded_frame->v_plane.stride = img->stride[VPX_PLANE_V];
+ decoded_frame->v_plane.length = img->stride[VPX_PLANE_V] * img->d_h;
+ decoded_frame->v_plane.data = img->planes[VPX_PLANE_V];
+
+ return true;
+}
+
+} // namespace cast
+} // namespace media
+
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
new file mode 100644
index 00000000000..bed02c8454d
--- /dev/null
+++ b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
@@ -0,0 +1,26 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of the source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_vp8_decoder',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/libvpx/',
+ ],
+ 'sources': [
+ 'vp8_decoder.cc',
+ 'vp8_decoder.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
new file mode 100644
index 00000000000..1acdb5a3d30
--- /dev/null
+++ b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
+#define MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
+
+typedef struct vpx_codec_ctx vpx_dec_ctx_t;
+
+namespace media {
+namespace cast {
+
+class Vp8Decoder {
+ public:
+ explicit Vp8Decoder(int number_of_cores);
+
+ ~Vp8Decoder();
+
+ // Initialize the decoder.
+ void InitDecode(int number_of_cores);
+
+ // Decode encoded image (as a part of a video stream).
+ bool Decode(const EncodedVideoFrame& input_image,
+ I420VideoFrame* decoded_frame);
+
+ private:
+ scoped_ptr<vpx_dec_ctx_t> decoder_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
diff --git a/chromium/media/cast/video_receiver/video_decoder.cc b/chromium/media/cast/video_receiver/video_decoder.cc
new file mode 100644
index 00000000000..238d6db0aba
--- /dev/null
+++ b/chromium/media/cast/video_receiver/video_decoder.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_receiver/video_decoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
+
+namespace media {
+namespace cast {
+
+VideoDecoder::VideoDecoder(scoped_refptr<CastThread> cast_thread,
+ const VideoReceiverConfig& video_config)
+ : cast_thread_(cast_thread),
+ codec_(video_config.codec),
+ vp8_decoder_() {
+ switch (video_config.codec) {
+ case kVp8:
+ // Initializing to use one core.
+ vp8_decoder_.reset(new Vp8Decoder(1));
+ break;
+ case kH264:
+ NOTIMPLEMENTED();
+ break;
+ case kExternalVideo:
+ DCHECK(false) << "Invalid codec";
+ break;
+ }
+}
+
+VideoDecoder::~VideoDecoder() {}
+
+void VideoDecoder::DecodeVideoFrame(
+ const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_callback,
+ base::Closure frame_release_callback) {
+ DecodeFrame(encoded_frame, render_time, frame_decoded_callback);
+ // Done with the frame -> release.
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, frame_release_callback);
+}
+
+void VideoDecoder::DecodeFrame(
+ const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_callback) {
+ DCHECK(encoded_frame->codec == codec_) << "Invalid codec";
+ // TODO(mikhal): Allow the application to allocate this memory.
+ scoped_ptr<I420VideoFrame> video_frame(new I420VideoFrame());
+
+ if (encoded_frame->data.size() > 0) {
+ bool success = vp8_decoder_->Decode(*encoded_frame, video_frame.get());
+ // Frame decoded - return frame to the user via callback.
+ if (success) {
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(frame_decoded_callback,
+ base::Passed(&video_frame), render_time));
+ }
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_receiver/video_decoder.h b/chromium/media/cast/video_receiver/video_decoder.h
new file mode 100644
index 00000000000..abf1955eb99
--- /dev/null
+++ b/chromium/media/cast/video_receiver/video_decoder.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
+#define MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/cast_thread.h"
+
+namespace media {
+namespace cast {
+
+class Vp8Decoder;
+
+class VideoDecoder : public base::RefCountedThreadSafe<VideoDecoder>{
+ public:
+ VideoDecoder(scoped_refptr<CastThread> cast_thread,
+ const VideoReceiverConfig& video_config);
+ ~VideoDecoder();
+
+
+ // Decode a video frame. Decoded (raw) frame will be returned in the
+ // frame_decoded_callback.
+ void DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_callback,
+ base::Closure frame_release_callback);
+
+ private:
+ void DecodeFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_callback);
+ VideoCodec codec_;
+ scoped_ptr<Vp8Decoder> vp8_decoder_;
+ scoped_refptr<CastThread> cast_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
diff --git a/chromium/media/cast/video_receiver/video_decoder_unittest.cc b/chromium/media/cast/video_receiver/video_decoder_unittest.cc
new file mode 100644
index 00000000000..0b95d128b75
--- /dev/null
+++ b/chromium/media/cast/video_receiver/video_decoder_unittest.cc
@@ -0,0 +1,94 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/video_receiver/video_decoder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+// Random frame size for testing.
+const int kFrameSize = 2345;
+
+static void ReleaseFrame(const EncodedVideoFrame* encoded_frame) {
+ // Empty since we in this test send in the same frame.
+}
+
+class TestVideoDecoderCallback :
+ public base::RefCountedThreadSafe<TestVideoDecoderCallback> {
+ public:
+ TestVideoDecoderCallback()
+ : num_called_(0) {}
+ // TODO(mikhal): Set and check expectations.
+ void DecodeComplete(scoped_ptr<I420VideoFrame> frame,
+ const base::TimeTicks render_time) {
+ num_called_++;
+ }
+
+ int number_times_called() {return num_called_;}
+ private:
+ int num_called_;
+};
+
+class VideoDecoderTest : public ::testing::Test {
+ protected:
+ VideoDecoderTest() {
+ // Configure to vp8.
+ config_.codec = kVp8;
+ config_.use_external_decoder = false;
+ video_decoder_callback_ = new TestVideoDecoderCallback();
+ }
+
+ ~VideoDecoderTest() {}
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ decoder_ = new VideoDecoder(cast_thread_, config_);
+ }
+
+ scoped_refptr<VideoDecoder> decoder_;
+ VideoReceiverConfig config_;
+ EncodedVideoFrame encoded_frame_;
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<TestVideoDecoderCallback> video_decoder_callback_;
+};
+
+// TODO(pwestin): Test decoding a real frame.
+TEST_F(VideoDecoderTest, SizeZero) {
+ encoded_frame_.codec = kVp8;
+ base::TimeTicks render_time;
+ VideoFrameDecodedCallback frame_decoded_callback =
+ base::Bind(&TestVideoDecoderCallback::DecodeComplete,
+ video_decoder_callback_.get());
+ decoder_->DecodeVideoFrame(&encoded_frame_, render_time,
+ frame_decoded_callback, base::Bind(ReleaseFrame, &encoded_frame_));
+ EXPECT_EQ(0, video_decoder_callback_->number_times_called());
+}
+
+TEST_F(VideoDecoderTest, InvalidCodec) {
+ base::TimeTicks render_time;
+ VideoFrameDecodedCallback frame_decoded_callback =
+ base::Bind(&TestVideoDecoderCallback::DecodeComplete,
+ video_decoder_callback_.get());
+ encoded_frame_.data.assign(kFrameSize, 0);
+ encoded_frame_.codec = kExternalVideo;
+ EXPECT_DEATH(decoder_->DecodeVideoFrame(&encoded_frame_, render_time,
+ frame_decoded_callback, base::Bind(ReleaseFrame, &encoded_frame_)),
+ "Invalid codec");
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_receiver/video_receiver.cc b/chromium/media/cast/video_receiver/video_receiver.cc
new file mode 100644
index 00000000000..4d0421cc6c0
--- /dev/null
+++ b/chromium/media/cast/video_receiver/video_receiver.cc
@@ -0,0 +1,337 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_receiver/video_receiver.h"
+
+#include <algorithm>
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/framer/framer.h"
+#include "media/cast/video_receiver/video_decoder.h"
+
+namespace media {
+namespace cast {
+
+const int64 kMinSchedulingDelayMs = 1;
+static const int64 kMaxFrameWaitMs = 20;
+static const int64 kMinTimeBetweenOffsetUpdatesMs = 500;
+static const int kTimeOffsetFilter = 8;
+
+// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
+// Used to pass payload data into the video receiver.
+class LocalRtpVideoData : public RtpData {
+ public:
+ explicit LocalRtpVideoData(VideoReceiver* video_receiver)
+ : video_receiver_(video_receiver),
+ time_updated_(false),
+ incoming_rtp_timestamp_(0) {
+ }
+ ~LocalRtpVideoData() {}
+
+ virtual void OnReceivedPayloadData(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader* rtp_header) OVERRIDE {
+ {
+ if (!time_updated_) {
+ incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
+ time_incoming_packet_ = video_receiver_->clock_->NowTicks();
+ time_updated_ = true;
+ } else if (video_receiver_->clock_->NowTicks() > time_incoming_packet_ +
+ base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
+ incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
+ time_incoming_packet_ = video_receiver_->clock_->NowTicks();
+ time_updated_ = true;
+ }
+ }
+ video_receiver_->IncomingRtpPacket(payload_data, payload_size, *rtp_header);
+ }
+
+ bool GetPacketTimeInformation(base::TimeTicks* time_incoming_packet,
+ uint32* incoming_rtp_timestamp) {
+ *time_incoming_packet = time_incoming_packet_;
+ *incoming_rtp_timestamp = incoming_rtp_timestamp_;
+ bool time_updated = time_updated_;
+ time_updated_ = false;
+ return time_updated;
+ }
+
+ private:
+ VideoReceiver* video_receiver_;
+ bool time_updated_;
+ base::TimeTicks time_incoming_packet_;
+ uint32 incoming_rtp_timestamp_;
+};
+
+// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
+// Used to convey cast-specific feedback from receiver to sender.
+// Callback triggered by the Framer (cast message builder).
+class LocalRtpVideoFeedback : public RtpPayloadFeedback {
+ public:
+ explicit LocalRtpVideoFeedback(VideoReceiver* video_receiver)
+ : video_receiver_(video_receiver) {
+ }
+ virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
+ video_receiver_->CastFeedback(cast_message);
+ }
+
+ virtual void RequestKeyFrame() OVERRIDE {
+ video_receiver_->RequestKeyFrame();
+ }
+
+ private:
+ VideoReceiver* video_receiver_;
+};
+
+// Local implementation of RtpReceiverStatistics (defined by rtcp.h).
+// Used to pass statistics data from the RTP module to the RTCP module.
+class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
+ public:
+ explicit LocalRtpReceiverStatistics(RtpReceiver* rtp_receiver)
+ : rtp_receiver_(rtp_receiver) {
+ }
+
+ virtual void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter) OVERRIDE {
+ rtp_receiver_->GetStatistics(fraction_lost,
+ cumulative_lost,
+ extended_high_sequence_number,
+ jitter);
+ }
+
+ private:
+ RtpReceiver* rtp_receiver_;
+};
+
+
+VideoReceiver::VideoReceiver(scoped_refptr<CastThread> cast_thread,
+ const VideoReceiverConfig& video_config,
+ PacedPacketSender* const packet_sender)
+ : cast_thread_(cast_thread),
+ codec_(video_config.codec),
+ incoming_ssrc_(video_config.incoming_ssrc),
+ default_tick_clock_(new base::DefaultTickClock()),
+ clock_(default_tick_clock_.get()),
+ incoming_payload_callback_(new LocalRtpVideoData(this)),
+ incoming_payload_feedback_(new LocalRtpVideoFeedback(this)),
+ rtp_receiver_(NULL, &video_config, incoming_payload_callback_.get()),
+ rtp_video_receiver_statistics_(
+ new LocalRtpReceiverStatistics(&rtp_receiver_)),
+ weak_factory_(this) {
+ target_delay_delta_ = base::TimeDelta::FromMilliseconds(
+ video_config.rtp_max_delay_ms);
+ int max_unacked_frames = video_config.rtp_max_delay_ms *
+ video_config.max_frame_rate / 1000;
+ DCHECK(max_unacked_frames) << "Invalid argument";
+
+ framer_.reset(new Framer(incoming_payload_feedback_.get(),
+ video_config.incoming_ssrc,
+ video_config.decoder_faster_than_max_frame_rate,
+ max_unacked_frames));
+ if (!video_config.use_external_decoder) {
+ video_decoder_ = new VideoDecoder(cast_thread_, video_config);
+ }
+
+ rtcp_.reset(new Rtcp(NULL,
+ packet_sender,
+ NULL,
+ rtp_video_receiver_statistics_.get(),
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ false,
+ video_config.feedback_ssrc,
+ video_config.rtcp_c_name));
+
+ rtcp_->SetRemoteSSRC(video_config.incoming_ssrc);
+ ScheduleNextRtcpReport();
+ ScheduleNextCastMessage();
+}
+
+VideoReceiver::~VideoReceiver() {}
+
+void VideoReceiver::GetRawVideoFrame(
+ const VideoFrameDecodedCallback& callback) {
+ DCHECK(video_decoder_);
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
+ base::TimeTicks render_time;
+ if (GetEncodedVideoFrame(encoded_frame.get(), &render_time)) {
+ base::Closure frame_release_callback =
+ base::Bind(&VideoReceiver::ReleaseFrame,
+ weak_factory_.GetWeakPtr(), encoded_frame->frame_id);
+ // Hand the ownership of the encoded frame to the decode thread.
+ cast_thread_->PostTask(CastThread::VIDEO_DECODER, FROM_HERE,
+ base::Bind(&VideoReceiver::DecodeVideoFrameThread,
+ weak_factory_.GetWeakPtr(), encoded_frame.release(),
+ render_time, callback, frame_release_callback));
+ }
+}
+
+// Utility function to run the decoder on a designated decoding thread.
+void VideoReceiver::DecodeVideoFrameThread(
+ const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_callback,
+ base::Closure frame_release_callback) {
+ video_decoder_->DecodeVideoFrame(encoded_frame, render_time,
+ frame_decoded_callback, frame_release_callback);
+ // Release memory.
+ delete encoded_frame;
+}
+
+bool VideoReceiver::GetEncodedVideoFrame(EncodedVideoFrame* encoded_frame,
+ base::TimeTicks* render_time) {
+ DCHECK(encoded_frame);
+ DCHECK(render_time);
+
+ uint32 rtp_timestamp = 0;
+ bool next_frame = false;
+
+ base::TimeTicks timeout = clock_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
+ if (!framer_->GetEncodedVideoFrame(timeout,
+ encoded_frame,
+ &rtp_timestamp,
+ &next_frame)) {
+ return false;
+ }
+ base::TimeTicks now = clock_->NowTicks();
+ *render_time = GetRenderTime(now, rtp_timestamp);
+
+ base::TimeDelta max_frame_wait_delta =
+ base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
+ base::TimeDelta time_until_render = *render_time - now;
+ base::TimeDelta time_until_release = time_until_render - max_frame_wait_delta;
+ base::TimeDelta zero_delta = base::TimeDelta::FromMilliseconds(0);
+ if (!next_frame && (time_until_release > zero_delta)) {
+ // TODO(mikhal): If returning false, then the application should sleep, or
+ // else which may spin here. Alternatively, we could sleep here, which will
+ // be posting a delayed task to ourselves, but then can end up in getting
+ // stuck as well.
+ return false;
+ }
+
+ base::TimeDelta dont_show_timeout_delta = time_until_render -
+ base::TimeDelta::FromMilliseconds(-kDontShowTimeoutMs);
+ if (codec_ == kVp8 && time_until_render < dont_show_timeout_delta) {
+ encoded_frame->data[0] &= 0xef;
+ VLOG(1) << "Don't show frame";
+ }
+
+ encoded_frame->codec = codec_;
+ return true;
+}
+
+base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
+ uint32 rtp_timestamp) {
+ // Senders time in ms when this frame was captured.
+ // Note: the senders clock and our local clock might not be synced.
+ base::TimeTicks rtp_timestamp_in_ticks;
+ base::TimeTicks time_incoming_packet;
+ uint32 incoming_rtp_timestamp;
+
+ if (time_offset_.InMilliseconds()) { // was == 0
+ incoming_payload_callback_->GetPacketTimeInformation(
+ &time_incoming_packet, &incoming_rtp_timestamp);
+
+ if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
+ incoming_rtp_timestamp,
+ &rtp_timestamp_in_ticks)) {
+ // We have not received any RTCP to sync the stream play it out as soon as
+ // possible.
+ return now;
+ }
+ time_offset_ = time_incoming_packet - rtp_timestamp_in_ticks;
+ } else if (incoming_payload_callback_->GetPacketTimeInformation(
+ &time_incoming_packet, &incoming_rtp_timestamp)) {
+ if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
+ incoming_rtp_timestamp,
+ &rtp_timestamp_in_ticks)) {
+ // Time to update the time_offset.
+ base::TimeDelta time_offset =
+ time_incoming_packet - rtp_timestamp_in_ticks;
+ time_offset_ = ((kTimeOffsetFilter - 1) * time_offset_ + time_offset)
+ / kTimeOffsetFilter;
+ }
+ }
+ if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
+ rtp_timestamp,
+ &rtp_timestamp_in_ticks)) {
+ // This can fail if we have not received any RTCP packets in a long time.
+ return now;
+ }
+ return (rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_);
+}
+
+void VideoReceiver::IncomingPacket(const uint8* packet, int length) {
+ if (Rtcp::IsRtcpPacket(packet, length)) {
+ rtcp_->IncomingRtcpPacket(packet, length);
+ return;
+ }
+ rtp_receiver_.ReceivedPacket(packet, length);
+}
+
+void VideoReceiver::IncomingRtpPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header) {
+ framer_->InsertPacket(payload_data, payload_size, rtp_header);
+}
+
+// Send a cast feedback message. Actual message created in the framer (cast
+// message builder).
+void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
+ rtcp_->SendRtcpCast(cast_message);
+ time_last_sent_cast_message_= clock_->NowTicks();
+}
+
+void VideoReceiver::ReleaseFrame(uint8 frame_id) {
+ framer_->ReleaseFrame(frame_id);
+}
+
+// Send a key frame request to the sender.
+void VideoReceiver::RequestKeyFrame() {
+ rtcp_->SendRtcpPli(incoming_ssrc_);
+}
+
+// Cast messages should be sent within a maximum interval. Schedule a call
+// if not triggered elsewhere, e.g. by the cast message_builder.
+void VideoReceiver::ScheduleNextCastMessage() {
+ base::TimeTicks send_time;
+ framer_->TimeToSendNextCastMessage(&send_time);
+
+ base::TimeDelta time_to_send = send_time - clock_->NowTicks();
+ time_to_send = std::max(time_to_send,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::SendNextCastMessage,
+ weak_factory_.GetWeakPtr()), time_to_send);
+}
+
+void VideoReceiver::SendNextCastMessage() {
+ framer_->SendCastMessage(); // Will only send a message if it is time.
+ ScheduleNextCastMessage();
+}
+
+// Schedule the next RTCP report to be sent back to the sender.
+void VideoReceiver::ScheduleNextRtcpReport() {
+ base::TimeDelta time_to_next =
+ rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::SendNextRtcpReport,
+ weak_factory_.GetWeakPtr()), time_to_next);
+}
+
+void VideoReceiver::SendNextRtcpReport() {
+ rtcp_->SendRtcpReport(incoming_ssrc_);
+ ScheduleNextRtcpReport();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_receiver/video_receiver.gypi b/chromium/media/cast/video_receiver/video_receiver.gypi
new file mode 100644
index 00000000000..bbee92e5ca2
--- /dev/null
+++ b/chromium/media/cast/video_receiver/video_receiver.gypi
@@ -0,0 +1,30 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of the source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_video_receiver',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc',
+ ],
+ 'sources': [
+ 'video_decoder.h',
+ 'video_decoder.cc',
+ 'video_receiver.h',
+ 'video_receiver.cc',
+ ], # source
+ 'dependencies': [
+ 'framer/framer.gyp:cast_framer',
+ 'video_receiver/codecs/vp8/vp8_decoder.gyp:cast_vp8_decoder',
+ 'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
+ ],
+ },
+ ],
+}
+
+
diff --git a/chromium/media/cast/video_receiver/video_receiver.h b/chromium/media/cast/video_receiver/video_receiver.h
new file mode 100644
index 00000000000..40d0b0320a5
--- /dev/null
+++ b/chromium/media/cast/video_receiver/video_receiver.h
@@ -0,0 +1,124 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
+#define MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver.h"
+
+namespace media {
+namespace cast {
+
+class Framer;
+class LocalRtpVideoData;
+class LocalRtpVideoFeedback;
+class PacedPacketSender;
+class PeerVideoReceiver;
+class Rtcp;
+class RtpReceiverStatistics;
+class VideoDecoder;
+
+
+// Should only be called from the Main cast thread.
+class VideoReceiver : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<VideoReceiver> {
+ public:
+
+ VideoReceiver(scoped_refptr<CastThread> cast_thread,
+ const VideoReceiverConfig& video_config,
+ PacedPacketSender* const packet_sender);
+
+ virtual ~VideoReceiver();
+
+ // Request a raw frame. Will return frame via callback when available.
+ void GetRawVideoFrame(const VideoFrameDecodedCallback& callback);
+
+ // Request an encoded frame. Memory allocated by application.
+ bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
+ base::TimeTicks* render_time);
+
+ // Insert a RTP packet to the video receiver.
+ void IncomingPacket(const uint8* packet, int length);
+
+ // Release frame - should be called following a GetEncodedVideoFrame call.
+ // Removes frame from the frame map in the framer.
+ void ReleaseFrame(uint8 frame_id);
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ rtcp_->set_clock(clock);
+ }
+ protected:
+ void IncomingRtpPacket(const uint8* payload_data,
+ int payload_size,
+ const RtpCastHeader& rtp_header);
+
+ void DecodeVideoFrameThread(
+ const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_callback,
+ base::Closure frame_release_callback);
+
+ private:
+ friend class LocalRtpVideoData;
+ friend class LocalRtpVideoFeedback;
+
+ void CastFeedback(const RtcpCastMessage& cast_message);
+ void RequestKeyFrame();
+
+ // Returns Render time based on current time and the rtp timestamp.
+ base::TimeTicks GetRenderTime(base::TimeTicks now, uint32 rtp_timestamp);
+
+ // Schedule timing for the next cast message.
+ void ScheduleNextCastMessage();
+
+ // Schedule timing for the next RTCP report.
+ void ScheduleNextRtcpReport();
+ // Actually send the next cast message.
+ void SendNextCastMessage();
+ // Actually send the next RTCP report.
+ void SendNextRtcpReport();
+
+ scoped_refptr<VideoDecoder> video_decoder_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<Framer> framer_;
+ const VideoCodec codec_;
+ const uint32 incoming_ssrc_;
+ base::TimeDelta target_delay_delta_;
+ scoped_ptr<LocalRtpVideoData> incoming_payload_callback_;
+ scoped_ptr<LocalRtpVideoFeedback> incoming_payload_feedback_;
+ RtpReceiver rtp_receiver_;
+ scoped_ptr<Rtcp> rtcp_;
+ scoped_ptr<RtpReceiverStatistics> rtp_video_receiver_statistics_;
+ base::TimeTicks time_last_sent_cast_message_;
+ // Sender-receiver offset estimation.
+ base::TimeDelta time_offset_;
+
+ scoped_ptr<base::TickClock> default_tick_clock_;
+ base::TickClock* clock_;
+
+ base::WeakPtrFactory<VideoReceiver> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoReceiver);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
+
diff --git a/chromium/media/cast/video_receiver/video_receiver_unittest.cc b/chromium/media/cast/video_receiver/video_receiver_unittest.cc
new file mode 100644
index 00000000000..b1b1c0b599f
--- /dev/null
+++ b/chromium/media/cast/video_receiver/video_receiver_unittest.cc
@@ -0,0 +1,138 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/video_receiver/video_receiver.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+static const int kPacketSize = 1500;
+static const int64 kStartMillisecond = 123456789;
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+// was thread counted thread safe.
+class TestVideoReceiverCallback :
+ public base::RefCountedThreadSafe<TestVideoReceiverCallback> {
+ public:
+ TestVideoReceiverCallback()
+ :num_called_(0) {}
+ // TODO(mikhal): Set and check expectations.
+ void DecodeComplete(scoped_ptr<I420VideoFrame> frame,
+ const base::TimeTicks render_time) {
+ ++num_called_;
+ }
+ int number_times_called() { return num_called_;}
+ private:
+ int num_called_;
+};
+
+class PeerVideoReceiver : public VideoReceiver {
+ public:
+ PeerVideoReceiver(scoped_refptr<CastThread> cast_thread,
+ const VideoReceiverConfig& video_config,
+ PacedPacketSender* const packet_sender)
+ : VideoReceiver(cast_thread, video_config, packet_sender) {
+ }
+ using VideoReceiver::IncomingRtpPacket;
+};
+
+
+class VideoReceiverTest : public ::testing::Test {
+ protected:
+ VideoReceiverTest() {
+ // Configure to use vp8 software implementation.
+ config_.codec = kVp8;
+ config_.use_external_decoder = false;
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ receiver_.reset(new
+ PeerVideoReceiver(cast_thread_, config_, &mock_transport_));
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ video_receiver_callback_ = new TestVideoReceiverCallback();
+ receiver_->set_clock(&testing_clock_);
+ }
+
+ ~VideoReceiverTest() {}
+
+ virtual void SetUp() {
+ payload_.assign(kPacketSize, 0);
+
+ // Always start with a key frame.
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 0;
+ rtp_header_.packet_id = 0;
+ rtp_header_.max_packet_id = 0;
+ rtp_header_.is_reference = false;
+ rtp_header_.reference_frame_id = 0;
+ }
+
+ MockPacedPacketSender mock_transport_;
+ VideoReceiverConfig config_;
+ scoped_ptr<PeerVideoReceiver> receiver_;
+ std::vector<uint8> payload_;
+ RtpCastHeader rtp_header_;
+ base::SimpleTestTickClock testing_clock_;
+
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<TestVideoReceiverCallback> video_receiver_callback_;
+};
+
+TEST_F(VideoReceiverTest, GetOnePacketEncodedframe) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
+ testing::Return(true));
+ receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ EncodedVideoFrame video_frame;
+ base::TimeTicks render_time;
+ EXPECT_TRUE(receiver_->GetEncodedVideoFrame(&video_frame, &render_time));
+ EXPECT_TRUE(video_frame.key_frame);
+ EXPECT_EQ(kVp8, video_frame.codec);
+ task_runner_->RunTasks();
+}
+
+TEST_F(VideoReceiverTest, MultiplePackets) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
+ testing::Return(true));
+ rtp_header_.max_packet_id = 2;
+ receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ ++rtp_header_.packet_id;
+ receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ EncodedVideoFrame video_frame;
+ base::TimeTicks render_time;
+ EXPECT_TRUE(receiver_->GetEncodedVideoFrame(&video_frame, &render_time));
+ task_runner_->RunTasks();
+}
+
+// TODO(pwestin): add encoded frames.
+TEST_F(VideoReceiverTest, GetOnePacketRawframe) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
+ testing::Return(true));
+ receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ // Decode error - requires legal input.
+ VideoFrameDecodedCallback frame_decoded_callback =
+ base::Bind(&TestVideoReceiverCallback::DecodeComplete,
+ video_receiver_callback_);
+ receiver_->GetRawVideoFrame(frame_decoded_callback);
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
+
+
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
new file mode 100644
index 00000000000..eaf6fbd714b
--- /dev/null
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -0,0 +1,352 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// TODO (pwestin): add a link to the design document describing the generic
+// protocol and the VP8 specific details.
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kMinIntra = 300;
+
+Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames)
+ : cast_config_(video_config),
+ use_multiple_video_buffers_(
+ cast_config_.max_number_of_video_buffers_used ==
+ kNumberOfVp8VideoBuffers),
+ max_number_of_repeated_buffers_in_a_row_(
+ (max_unacked_frames > kNumberOfVp8VideoBuffers) ?
+ ((max_unacked_frames - 1) / kNumberOfVp8VideoBuffers) : 0),
+ config_(new vpx_codec_enc_cfg_t()),
+ encoder_(new vpx_codec_ctx_t()),
+ // Creating a wrapper to the image - setting image data to NULL. Actual
+ // pointer will be set during encode. Setting align to 1, as it is
+ // meaningless (actual memory is not allocated).
+ raw_image_(vpx_img_wrap(NULL, IMG_FMT_I420, video_config.width,
+ video_config.height, 1, NULL)),
+ key_frame_requested_(true),
+ timestamp_(0),
+ last_encoded_frame_id_(kStartFrameId),
+ number_of_repeated_buffers_(0) {
+ // VP8 have 3 buffers available for prediction, with
+ // max_number_of_video_buffers_used set to 1 we maximize the coding efficiency
+ // however in this mode we can not skip frames in the receiver to catch up
+ // after a temporary network outage; with max_number_of_video_buffers_used
+ // set to 3 we allow 2 frames to be skipped by the receiver without error
+ // propagation.
+ DCHECK(cast_config_.max_number_of_video_buffers_used == 1 ||
+ cast_config_.max_number_of_video_buffers_used ==
+ kNumberOfVp8VideoBuffers) << "Invalid argument";
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ acked_frame_buffers_[i] = true;
+ used_buffers_frame_id_[i] = kStartFrameId;
+ }
+ InitEncode(video_config.number_of_cores);
+}
+
+Vp8Encoder::~Vp8Encoder() {
+ vpx_codec_destroy(encoder_);
+ vpx_img_free(raw_image_);
+}
+
+void Vp8Encoder::InitEncode(int number_of_cores) {
+ // Populate encoder configuration with default values.
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), config_.get(), 0)) {
+ DCHECK(false) << "Invalid return value";
+ }
+ config_->g_w = cast_config_.width;
+ config_->g_h = cast_config_.height;
+ config_->rc_target_bitrate = cast_config_.start_bitrate / 1000; // In kbit/s.
+
+ // Setting the codec time base.
+ config_->g_timebase.num = 1;
+ config_->g_timebase.den = kVideoFrequency;
+ config_->g_lag_in_frames = 0;
+ config_->kf_mode = VPX_KF_DISABLED;
+ if (use_multiple_video_buffers_) {
+ // We must enable error resilience when we use multiple buffers, due to
+ // codec requirements.
+ config_->g_error_resilient = 1;
+ }
+
+ if (cast_config_.width * cast_config_.height > 640 * 480
+ && number_of_cores >= 2) {
+ config_->g_threads = 2; // 2 threads for qHD/HD.
+ } else {
+ config_->g_threads = 1; // 1 thread for VGA or less.
+ }
+
+ // Rate control settings.
+ // TODO(pwestin): revisit these constants. Currently identical to webrtc.
+ config_->rc_dropframe_thresh = 30;
+ config_->rc_end_usage = VPX_CBR;
+ config_->g_pass = VPX_RC_ONE_PASS;
+ config_->rc_resize_allowed = 0;
+ config_->rc_min_quantizer = cast_config_.min_qp;
+ config_->rc_max_quantizer = cast_config_.max_qp;
+ config_->rc_undershoot_pct = 100;
+ config_->rc_overshoot_pct = 15;
+ config_->rc_buf_initial_sz = 500;
+ config_->rc_buf_optimal_sz = 600;
+ config_->rc_buf_sz = 1000;
+
+ // set the maximum target size of any key-frame.
+ uint32 rc_max_intra_target = MaxIntraTarget(config_->rc_buf_optimal_sz);
+ vpx_codec_flags_t flags = 0;
+ // TODO(mikhal): Tune settings.
+ if (vpx_codec_enc_init(encoder_, vpx_codec_vp8_cx(), config_.get(), flags)) {
+ DCHECK(false) << "Invalid return value";
+ }
+ vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
+ vpx_codec_control(encoder_, VP8E_SET_NOISE_SENSITIVITY, 0);
+ vpx_codec_control(encoder_, VP8E_SET_CPUUSED, -6);
+ vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target);
+}
+
+bool Vp8Encoder::Encode(const I420VideoFrame& input_image,
+ EncodedVideoFrame* encoded_image) {
+ // Image in vpx_image_t format.
+ // Input image is const. VP8's raw image is not defined as const.
+ raw_image_->planes[PLANE_Y] = const_cast<uint8*>(input_image.y_plane.data);
+ raw_image_->planes[PLANE_U] = const_cast<uint8*>(input_image.u_plane.data);
+ raw_image_->planes[PLANE_V] = const_cast<uint8*>(input_image.v_plane.data);
+
+ raw_image_->stride[VPX_PLANE_Y] = input_image.y_plane.stride;
+ raw_image_->stride[VPX_PLANE_U] = input_image.u_plane.stride;
+ raw_image_->stride[VPX_PLANE_V] = input_image.v_plane.stride;
+
+ uint8 latest_frame_id_to_reference;
+ Vp8Buffers buffer_to_update;
+ vpx_codec_flags_t flags = 0;
+ if (key_frame_requested_) {
+ flags = VPX_EFLAG_FORCE_KF;
+ // Self reference.
+ latest_frame_id_to_reference =
+ static_cast<uint8>(last_encoded_frame_id_ + 1);
+ // We can pick any buffer as buffer_to_update since we update
+ // them all.
+ buffer_to_update = kLastBuffer;
+ } else {
+ // Reference all acked frames (buffers).
+ latest_frame_id_to_reference = GetLatestFrameIdToReference();
+ GetCodecReferenceFlags(&flags);
+ buffer_to_update = GetNextBufferToUpdate();
+ GetCodecUpdateFlags(buffer_to_update, &flags);
+ }
+
+ // Note: The duration does not reflect the real time between frames. This is
+ // done to keep the encoder happy.
+ uint32 duration = kVideoFrequency / cast_config_.max_frame_rate;
+ if (vpx_codec_encode(encoder_, raw_image_, timestamp_, duration, flags,
+ VPX_DL_REALTIME)) {
+ return false;
+ }
+ timestamp_ += duration;
+
+ // Get encoded frame.
+ const vpx_codec_cx_pkt_t *pkt = NULL;
+ vpx_codec_iter_t iter = NULL;
+ int total_size = 0;
+ while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) {
+ if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ total_size += pkt->data.frame.sz;
+ encoded_image->data.reserve(total_size);
+ encoded_image->data.insert(
+ encoded_image->data.end(),
+ static_cast<const uint8*>(pkt->data.frame.buf),
+ static_cast<const uint8*>(pkt->data.frame.buf) +
+ pkt->data.frame.sz);
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ encoded_image->key_frame = true;
+ } else {
+ encoded_image->key_frame = false;
+ }
+ }
+ }
+ // Don't update frame_id for zero size frames.
+ if (total_size == 0) return true;
+
+ // Populate the encoded frame.
+ encoded_image->codec = kVp8;
+ encoded_image->last_referenced_frame_id = latest_frame_id_to_reference;
+ encoded_image->frame_id = ++last_encoded_frame_id_;
+
+ if (encoded_image->key_frame) {
+ key_frame_requested_ = false;
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ used_buffers_frame_id_[i] = encoded_image->frame_id;
+ }
+ // We can pick any buffer as last_used_vp8_buffer_ since we update
+ // them all.
+ last_used_vp8_buffer_ = buffer_to_update;
+ } else {
+ if (buffer_to_update != kNoBuffer) {
+ acked_frame_buffers_[buffer_to_update] = false;
+ used_buffers_frame_id_[buffer_to_update] = encoded_image->frame_id;
+ last_used_vp8_buffer_ = buffer_to_update;
+ }
+ }
+ return true;
+}
+
+void Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
+ if (!use_multiple_video_buffers_) return;
+
+ // We need to reference something.
+ DCHECK(acked_frame_buffers_[kAltRefBuffer] ||
+ acked_frame_buffers_[kGoldenBuffer] ||
+ acked_frame_buffers_[kLastBuffer]) << "Invalid state";
+
+ if (!acked_frame_buffers_[kAltRefBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_ARF;
+ }
+ if (!acked_frame_buffers_[kGoldenBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_GF;
+ }
+ if (!acked_frame_buffers_[kLastBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_LAST;
+ }
+}
+
+uint8 Vp8Encoder::GetLatestFrameIdToReference() {
+ if (!use_multiple_video_buffers_) return last_encoded_frame_id_;
+
+ int latest_frame_id_to_reference = -1;
+ if (acked_frame_buffers_[kAltRefBuffer]) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kAltRefBuffer];
+ }
+ if (acked_frame_buffers_[kGoldenBuffer]) {
+ if (latest_frame_id_to_reference == -1) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kGoldenBuffer];
+ } else {
+ if (IsNewerFrameId(used_buffers_frame_id_[kGoldenBuffer],
+ latest_frame_id_to_reference)) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kGoldenBuffer];
+ }
+ }
+ }
+ if (acked_frame_buffers_[kLastBuffer]) {
+ if (latest_frame_id_to_reference == -1) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kLastBuffer];
+ } else {
+ if (IsNewerFrameId(used_buffers_frame_id_[kLastBuffer],
+ latest_frame_id_to_reference)) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kLastBuffer];
+ }
+ }
+ }
+ DCHECK(latest_frame_id_to_reference != -1) << "Invalid state";
+ return static_cast<uint8>(latest_frame_id_to_reference);
+}
+
+Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
+ // Update at most one buffer, except for key-frames.
+
+ Vp8Buffers buffer_to_update;
+ if (number_of_repeated_buffers_ < max_number_of_repeated_buffers_in_a_row_) {
+ // TODO(pwestin): experiment with this. The issue with only this change is
+ // that we can end up with only 4 frames in flight when we expect 6.
+ // buffer_to_update = last_used_vp8_buffer_;
+ buffer_to_update = kNoBuffer;
+ ++number_of_repeated_buffers_;
+ } else {
+ number_of_repeated_buffers_ = 0;
+ switch (last_used_vp8_buffer_) {
+ case kAltRefBuffer:
+ buffer_to_update = kLastBuffer;
+ break;
+ case kLastBuffer:
+ buffer_to_update = kGoldenBuffer;
+ break;
+ case kGoldenBuffer:
+ buffer_to_update = kAltRefBuffer;
+ break;
+ case kNoBuffer:
+ DCHECK(false) << "Invalid state";
+ break;
+ }
+ }
+ return buffer_to_update;
+}
+
+void Vp8Encoder::GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
+ vpx_codec_flags_t* flags) {
+ if (!use_multiple_video_buffers_) return;
+
+ // Update at most one buffer, except for key-frames.
+ switch (buffer_to_update) {
+ case kAltRefBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kLastBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ break;
+ case kGoldenBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kNoBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ *flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ }
+}
+
+void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
+ config_->rc_target_bitrate = new_bitrate / 1000; // In kbit/s.
+ // Update encoder context.
+ if (vpx_codec_enc_config_set(encoder_, config_.get())) {
+ DCHECK(false) << "Invalid return value";
+ }
+}
+
+void Vp8Encoder::LatestFrameIdToReference(uint8 frame_id) {
+ if (!use_multiple_video_buffers_) return;
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ if (frame_id == used_buffers_frame_id_[i]) {
+ acked_frame_buffers_[i] = true;
+ }
+ }
+}
+
+void Vp8Encoder::GenerateKeyFrame() {
+ key_frame_requested_ = true;
+}
+
+// Calculate the max size of the key frame relative to a normal delta frame.
+uint32 Vp8Encoder::MaxIntraTarget(uint32 optimal_buffer_size_ms) const {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scale_parameter.
+ // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
+ // This values is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
+ // The target in % is as follows:
+
+ float scale_parameter = 0.5;
+ uint32 target_pct = optimal_buffer_size_ms * scale_parameter *
+ cast_config_.max_frame_rate / 10;
+
+ // Don't go below 3 times the per frame bandwidth.
+ return std::max(target_pct, kMinIntra);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
new file mode 100644
index 00000000000..0b12789aa05
--- /dev/null
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
@@ -0,0 +1,19 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_vp8_encoder',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'vp8_encoder.cc',
+ 'vp8_encoder.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
new file mode 100644
index 00000000000..3b041a01d26
--- /dev/null
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
+
+// VPX forward declaration.
+typedef struct vpx_codec_ctx vpx_enc_ctx_t;
+
+namespace media {
+namespace cast {
+
+const int kNumberOfVp8VideoBuffers = 3;
+
+class Vp8Encoder {
+ public:
+ Vp8Encoder(const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames);
+
+ ~Vp8Encoder();
+
+ // Encode a raw image (as a part of a video stream).
+ bool Encode(const I420VideoFrame& input_image,
+ EncodedVideoFrame* encoded_image);
+
+ // Update the encoder with a new target bit rate.
+ void UpdateRates(uint32 new_bitrate);
+
+ // Set the next frame to be a key frame.
+ void GenerateKeyFrame();
+
+ void LatestFrameIdToReference(uint8 frame_id);
+
+ private:
+ enum Vp8Buffers {
+ kAltRefBuffer = 0,
+ kGoldenBuffer = 1,
+ kLastBuffer = 2,
+ kNoBuffer = 3 // Note: must be last.
+ };
+
+ void InitEncode(int number_of_cores);
+
+ // Calculate the max target in % for a keyframe.
+ uint32 MaxIntraTarget(uint32 optimal_buffer_size) const;
+
+ // Calculate which next Vp8 buffers to update with the next frame.
+ Vp8Buffers GetNextBufferToUpdate();
+
+ // Calculate which previous frame to reference.
+ uint8_t GetLatestFrameIdToReference();
+
+ // Get encoder flags for our referenced encoder buffers.
+ void GetCodecReferenceFlags(vpx_codec_flags_t* flags);
+
+ // Get encoder flags for our encoder buffers to update with next frame.
+ void GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
+ vpx_codec_flags_t* flags);
+
+ const VideoSenderConfig cast_config_;
+ const bool use_multiple_video_buffers_;
+ const int max_number_of_repeated_buffers_in_a_row_;
+
+ // VP8 internal objects.
+ scoped_ptr<vpx_codec_enc_cfg_t> config_;
+ vpx_enc_ctx_t* encoder_;
+ vpx_image_t* raw_image_;
+
+ bool key_frame_requested_;
+ int64 timestamp_;
+ uint8 last_encoded_frame_id_;
+ uint8 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
+ bool acked_frame_buffers_[kNumberOfVp8VideoBuffers];
+ Vp8Buffers last_used_vp8_buffer_;
+ int number_of_repeated_buffers_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
diff --git a/chromium/media/cast/video_sender/mock_video_encoder_controller.h b/chromium/media/cast/video_sender/mock_video_encoder_controller.h
new file mode 100644
index 00000000000..90b2abdf3bc
--- /dev/null
+++ b/chromium/media/cast/video_sender/mock_video_encoder_controller.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+#define MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+
+#include "media/cast/cast_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockVideoEncoderController : public VideoEncoderController {
+ public:
+ MOCK_METHOD1(SetBitRate, void(int new_bit_rate));
+
+ MOCK_METHOD1(SkipNextFrame, void(bool skip_next_frame));
+
+ MOCK_METHOD0(GenerateKeyFrame, void());
+
+ MOCK_METHOD1(LatestFrameIdToReference, void(uint8 frame_id));
+
+ MOCK_CONST_METHOD0(NumberOfSkippedFrames, int());
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+
diff --git a/chromium/media/cast/video_sender/video_encoder.cc b/chromium/media/cast/video_sender/video_encoder.cc
new file mode 100644
index 00000000000..94a296c1bc0
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_encoder.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/video_encoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+VideoEncoder::VideoEncoder(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames)
+ : video_config_(video_config),
+ cast_thread_(cast_thread),
+ skip_next_frame_(false),
+ skip_count_(0) {
+ if (video_config.codec == kVp8) {
+ vp8_encoder_.reset(new Vp8Encoder(video_config, max_unacked_frames));
+ } else {
+ DCHECK(false) << "Invalid config"; // Codec not supported.
+ }
+
+ dynamic_config_.key_frame_requested = false;
+ dynamic_config_.latest_frame_id_to_reference = kStartFrameId;
+ dynamic_config_.bit_rate = video_config.start_bitrate;
+}
+
+VideoEncoder::~VideoEncoder() {}
+
+bool VideoEncoder::EncodeVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback) {
+ if (video_config_.codec != kVp8) return false;
+
+ if (skip_next_frame_) {
+ ++skip_count_;
+ VLOG(1) << "Skip encoding frame";
+ return false;
+ }
+
+ cast_thread_->PostTask(CastThread::VIDEO_ENCODER, FROM_HERE,
+ base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread, this,
+ video_frame, capture_time, dynamic_config_, frame_encoded_callback,
+ frame_release_callback));
+
+ dynamic_config_.key_frame_requested = false;
+ return true;
+}
+
+void VideoEncoder::EncodeVideoFrameEncoderThread(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const CodecDynamicConfig& dynamic_config,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback) {
+ if (dynamic_config.key_frame_requested) {
+ vp8_encoder_->GenerateKeyFrame();
+ }
+ vp8_encoder_->LatestFrameIdToReference(
+ dynamic_config.latest_frame_id_to_reference);
+ vp8_encoder_->UpdateRates(dynamic_config.bit_rate);
+
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
+ bool retval = vp8_encoder_->Encode(*video_frame, encoded_frame.get());
+
+ // We are done with the video frame release it.
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, frame_release_callback);
+
+ if (!retval) {
+ VLOG(1) << "Encoding failed";
+ return;
+ }
+ if (encoded_frame->data.size() <= 0) {
+ VLOG(1) << "Encoding resulted in an empty frame";
+ return;
+ }
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(frame_encoded_callback,
+ base::Passed(&encoded_frame), capture_time));
+}
+
+// Inform the encoder about the new target bit rate.
+void VideoEncoder::SetBitRate(int new_bit_rate) OVERRIDE {
+ dynamic_config_.bit_rate = new_bit_rate;
+}
+
+// Inform the encoder to not encode the next frame.
+void VideoEncoder::SkipNextFrame(bool skip_next_frame) OVERRIDE {
+ skip_next_frame_ = skip_next_frame;
+}
+
+// Inform the encoder to encode the next frame as a key frame.
+void VideoEncoder::GenerateKeyFrame() OVERRIDE {
+ dynamic_config_.key_frame_requested = true;
+}
+
+// Inform the encoder to only reference frames older or equal to frame_id;
+void VideoEncoder::LatestFrameIdToReference(uint8 frame_id) OVERRIDE {
+ dynamic_config_.latest_frame_id_to_reference = frame_id;
+}
+
+int VideoEncoder::NumberOfSkippedFrames() const OVERRIDE {
+ return skip_count_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/video_encoder.h b/chromium/media/cast/video_sender/video_encoder.h
new file mode 100644
index 00000000000..d3b261e1033
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_encoder.h
@@ -0,0 +1,80 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+
+namespace media {
+namespace cast {
+
+// This object is called external from the main cast thread and internally from
+// the video encoder thread.
+class VideoEncoder : public VideoEncoderController,
+ public base::RefCountedThreadSafe<VideoEncoder> {
+ public:
+ typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
+ const base::TimeTicks&)> FrameEncodedCallback;
+
+ VideoEncoder(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames);
+
+ virtual ~VideoEncoder();
+
+ // Called from the main cast thread. This function post the encode task to the
+ // video encoder thread;
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ // Once the encoded frame is ready the frame_encoded_callback is called.
+ bool EncodeVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback);
+
+ protected:
+ struct CodecDynamicConfig {
+ bool key_frame_requested;
+ uint8 latest_frame_id_to_reference;
+ int bit_rate;
+ };
+
+ // The actual encode, called from the video encoder thread.
+ void EncodeVideoFrameEncoderThread(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const CodecDynamicConfig& dynamic_config,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback);
+
+ // The following functions are called from the main cast thread.
+ virtual void SetBitRate(int new_bit_rate) OVERRIDE;
+ virtual void SkipNextFrame(bool skip_next_frame) OVERRIDE;
+ virtual void GenerateKeyFrame() OVERRIDE;
+ virtual void LatestFrameIdToReference(uint8 frame_id) OVERRIDE;
+ virtual int NumberOfSkippedFrames() const OVERRIDE;
+
+ private:
+ const VideoSenderConfig video_config_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<Vp8Encoder> vp8_encoder_;
+ CodecDynamicConfig dynamic_config_;
+ bool skip_next_frame_;
+ int skip_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoEncoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/video_sender/video_encoder_unittest.cc b/chromium/media/cast/video_sender/video_encoder_unittest.cc
new file mode 100644
index 00000000000..d18a043b73b
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_encoder_unittest.cc
@@ -0,0 +1,245 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/video_sender/video_encoder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+static void ReleaseFrame(const I420VideoFrame* frame) {
+ // Empty since we in this test send in the same frame.
+}
+
+class TestVideoEncoderCallback :
+ public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
+ public:
+ TestVideoEncoderCallback() {}
+
+ void SetExpectedResult(bool expected_key_frame,
+ uint8 expected_frame_id,
+ uint8 expected_last_referenced_frame_id,
+ const base::TimeTicks& expected_capture_time) {
+ expected_key_frame_ = expected_key_frame;
+ expected_frame_id_ = expected_frame_id;
+ expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
+ expected_capture_time_ = expected_capture_time;
+ }
+
+ void DeliverEncodedVideoFrame(scoped_ptr<EncodedVideoFrame> encoded_frame,
+ const base::TimeTicks& capture_time) {
+ EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
+ EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
+ EXPECT_EQ(expected_last_referenced_frame_id_,
+ encoded_frame->last_referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, capture_time);
+ }
+
+ private:
+ bool expected_key_frame_;
+ uint8 expected_frame_id_;
+ uint8 expected_last_referenced_frame_id_;
+ base::TimeTicks expected_capture_time_;
+};
+
+class VideoEncoderTest : public ::testing::Test {
+ protected:
+ VideoEncoderTest()
+ : pixels_(320 * 240, 123),
+ test_video_encoder_callback_(new TestVideoEncoderCallback()) {
+ video_config_.sender_ssrc = 1;
+ video_config_.incoming_feedback_ssrc = 2;
+ video_config_.rtp_payload_type = 127;
+ video_config_.use_external_encoder = false;
+ video_config_.width = 320;
+ video_config_.height = 240;
+ video_config_.max_bitrate = 5000000;
+ video_config_.min_bitrate = 1000000;
+ video_config_.start_bitrate = 2000000;
+ video_config_.max_qp = 56;
+ video_config_.min_qp = 0;
+ video_config_.max_frame_rate = 30;
+ video_config_.max_number_of_video_buffers_used = 3;
+ video_config_.codec = kVp8;
+ video_frame_.width = 320;
+ video_frame_.height = 240;
+ video_frame_.y_plane.stride = video_frame_.width;
+ video_frame_.y_plane.length = video_frame_.width;
+ video_frame_.y_plane.data = &(pixels_[0]);
+ video_frame_.u_plane.stride = video_frame_.width / 2;
+ video_frame_.u_plane.length = video_frame_.width / 2;
+ video_frame_.u_plane.data = &(pixels_[0]);
+ video_frame_.v_plane.stride = video_frame_.width / 2;
+ video_frame_.v_plane.length = video_frame_.width / 2;
+ video_frame_.v_plane.data = &(pixels_[0]);
+ }
+
+ ~VideoEncoderTest() {}
+
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ }
+
+ void Configure(uint8 max_unacked_frames) {
+ video_encoder_= new VideoEncoder(cast_thread_, video_config_,
+ max_unacked_frames);
+ video_encoder_controller_ = video_encoder_.get();
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ std::vector<uint8> pixels_;
+ scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
+ VideoSenderConfig video_config_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<VideoEncoder> video_encoder_;
+ VideoEncoderController* video_encoder_controller_;
+ I420VideoFrame video_frame_;
+
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
+ Configure(3);
+
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ base::TimeTicks capture_time;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 6; ++i) {
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+ }
+}
+
+TEST_F(VideoEncoderTest, EncodePattern60fpsRunningOutOfAck) {
+ Configure(6);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 9; ++i) {
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+ }
+}
+
+TEST_F(VideoEncoderTest, EncodePattern60fps200msDelayRunningOutOfAck) {
+ Configure(12);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(2);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(3);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+
+ video_encoder_controller_->LatestFrameIdToReference(4);
+
+ for (int i = 5; i < 17; ++i) {
+ test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ task_runner_->RunTasks();
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/video_sender.cc b/chromium/media/cast/video_sender/video_sender.cc
new file mode 100644
index 00000000000..1b422388324
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_sender.cc
@@ -0,0 +1,346 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/video_sender.h"
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/video_sender/video_encoder.h"
+
+namespace media {
+namespace cast {
+
+const int64 kMinSchedulingDelayMs = 1;
+
+class LocalRtcpVideoSenderFeedback : public RtcpSenderFeedback {
+ public:
+ explicit LocalRtcpVideoSenderFeedback(VideoSender* video_sender)
+ : video_sender_(video_sender) {
+ }
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {}
+
+ virtual void OnReceivedReportBlock(
+ const RtcpReportBlock& report_block) OVERRIDE {}
+
+ virtual void OnReceivedRpsi(uint8 payload_type,
+ uint64 picture_id) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedNackRequest(
+ const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedIntraFrameRequest() OVERRIDE {
+ video_sender_->OnReceivedIntraFrameRequest();
+ }
+
+ virtual void OnReceivedCastFeedback(
+ const RtcpCastMessage& cast_feedback) OVERRIDE {
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+ }
+
+ private:
+ VideoSender* video_sender_;
+};
+
+class LocalRtpVideoSenderStatistics : public RtpSenderStatistics {
+ public:
+ explicit LocalRtpVideoSenderStatistics(RtpSender* rtp_sender)
+ : rtp_sender_(rtp_sender) {
+ }
+
+ virtual void GetStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) OVERRIDE {
+ rtp_sender_->RtpStatistics(now, sender_info);
+ }
+
+ private:
+ RtpSender* rtp_sender_;
+};
+
+VideoSender::VideoSender(
+ scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender)
+ : incoming_feedback_ssrc_(video_config.incoming_feedback_ssrc),
+ rtp_max_delay_(
+ base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
+ max_frame_rate_(video_config.max_frame_rate),
+ cast_thread_(cast_thread),
+ rtcp_feedback_(new LocalRtcpVideoSenderFeedback(this)),
+ rtp_sender_(new RtpSender(NULL, &video_config, paced_packet_sender)),
+ last_acked_frame_id_(-1),
+ last_sent_frame_id_(-1),
+ last_sent_key_frame_id_(-1),
+ duplicate_ack_(0),
+ last_skip_count_(0),
+ congestion_control_(video_config.congestion_control_back_off,
+ video_config.max_bitrate,
+ video_config.min_bitrate,
+ video_config.start_bitrate),
+ clock_(&default_tick_clock_),
+ weak_factory_(this) {
+ max_unacked_frames_ = static_cast<uint8>(video_config.rtp_max_delay_ms *
+ video_config.max_frame_rate / 1000);
+ DCHECK(max_unacked_frames_ > 0) << "Invalid argument";
+
+ rtp_video_sender_statistics_.reset(
+ new LocalRtpVideoSenderStatistics(rtp_sender_.get()));
+
+ if (video_config.use_external_encoder) {
+ DCHECK(video_encoder_controller) << "Invalid argument";
+ video_encoder_controller_ = video_encoder_controller;
+ } else {
+ video_encoder_ = new VideoEncoder(cast_thread, video_config,
+ max_unacked_frames_);
+ video_encoder_controller_ = video_encoder_.get();
+ }
+ rtcp_.reset(new Rtcp(
+ rtcp_feedback_.get(),
+ paced_packet_sender,
+ rtp_video_sender_statistics_.get(),
+ NULL,
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ true,
+ video_config.sender_ssrc,
+ video_config.rtcp_c_name));
+
+ rtcp_->SetRemoteSSRC(video_config.incoming_feedback_ssrc);
+ ScheduleNextRtcpReport();
+ ScheduleNextResendCheck();
+ ScheduleNextSkippedFramesCheck();
+}
+
+VideoSender::~VideoSender() {}
+
+void VideoSender::InsertRawVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) {
+ DCHECK(video_encoder_.get()) << "Invalid state";
+
+ if (!video_encoder_->EncodeVideoFrame(video_frame, capture_time,
+ base::Bind(&VideoSender::SendEncodedVideoFrameMainThread,
+ weak_factory_.GetWeakPtr()), callback)) {
+ VLOG(1) << "Failed to InsertRawVideoFrame";
+ }
+}
+
+void VideoSender::InsertCodedVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) {
+ DCHECK(!video_encoder_.get()) << "Invalid state";
+ DCHECK(encoded_frame) << "Invalid argument";
+
+ SendEncodedVideoFrame(encoded_frame, capture_time);
+ callback.Run();
+}
+
+void VideoSender::SendEncodedVideoFrameMainThread(
+ scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& capture_time) {
+ SendEncodedVideoFrame(video_frame.get(), capture_time);
+}
+
+void VideoSender::SendEncodedVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks& capture_time) {
+ last_send_time_ = clock_->NowTicks();
+ rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+ if (encoded_frame->key_frame) {
+ last_sent_key_frame_id_ = encoded_frame->frame_id;
+ }
+ last_sent_frame_id_ = encoded_frame->frame_id;
+ UpdateFramesInFlight();
+}
+
+void VideoSender::OnReceivedIntraFrameRequest() {
+ if (last_sent_key_frame_id_ != -1) {
+ uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
+ static_cast<uint8>(last_sent_key_frame_id_);
+ if (frames_in_flight < (max_unacked_frames_ - 1)) return;
+ }
+ video_encoder_controller_->GenerateKeyFrame();
+ last_acked_frame_id_ = -1;
+ last_sent_frame_id_ = -1;
+}
+
+void VideoSender::IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback) {
+ rtcp_->IncomingRtcpPacket(packet, length);
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+}
+
+void VideoSender::ScheduleNextRtcpReport() {
+ base::TimeDelta time_to_next =
+ rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::SendRtcpReport() {
+ rtcp_->SendRtcpReport(incoming_feedback_ssrc_);
+ ScheduleNextRtcpReport();
+}
+
+void VideoSender::ScheduleNextResendCheck() {
+ base::TimeDelta time_to_next;
+ if (last_send_time_.is_null()) {
+ time_to_next = rtp_max_delay_;
+ } else {
+ time_to_next = last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
+ }
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::ResendCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::ResendCheck() {
+ if (!last_send_time_.is_null() && last_sent_frame_id_ != -1) {
+ base::TimeDelta time_to_next =
+ last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
+
+ if (last_acked_frame_id_ == -1) {
+ // We have not received any ack, send a key frame.
+ video_encoder_controller_->GenerateKeyFrame();
+ last_acked_frame_id_ = -1;
+ last_sent_frame_id_ = -1;
+ UpdateFramesInFlight();
+ } else {
+ ResendFrame(static_cast<uint8>(last_acked_frame_id_ + 1));
+ }
+ }
+ ScheduleNextResendCheck();
+}
+
+void VideoSender::ScheduleNextSkippedFramesCheck() {
+ base::TimeDelta time_to_next;
+ if (last_checked_skip_count_time_.is_null()) {
+ time_to_next =
+ base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
+ } else {
+ time_to_next = last_checked_skip_count_time_ - clock_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
+ }
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::SkippedFramesCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::SkippedFramesCheck() {
+ int skip_count = video_encoder_controller_->NumberOfSkippedFrames();
+ if (skip_count - last_skip_count_ >
+ kSkippedFramesThreshold * max_frame_rate_) {
+ // TODO(pwestin): Propagate this up to the application.
+ }
+ last_skip_count_ = skip_count;
+ last_checked_skip_count_time_ = clock_->NowTicks();
+ ScheduleNextSkippedFramesCheck();
+}
+
+void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+
+ if (rtcp_->Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
+ // Don't use a RTT lower than our average.
+ rtt = std::max(rtt, avg_rtt);
+ } else {
+ // We have no measured value use default.
+ rtt = base::TimeDelta::FromMilliseconds(kStartRttMs);
+ }
+ if (cast_feedback.missing_frames_and_packets_.empty()) {
+ // No lost packets.
+ int resend_frame = -1;
+ if (last_sent_frame_id_ == -1) return;
+
+ video_encoder_controller_->LatestFrameIdToReference(
+ cast_feedback.ack_frame_id_);
+
+ if (static_cast<uint8>(last_acked_frame_id_ + 1) ==
+ cast_feedback.ack_frame_id_) {
+ uint32 new_bitrate = 0;
+ if (congestion_control_.OnAck(rtt, &new_bitrate)) {
+ video_encoder_controller_->SetBitRate(new_bitrate);
+ }
+ }
+ if (last_acked_frame_id_ == cast_feedback.ack_frame_id_ &&
+ // We only count duplicate ACKs when we have sent newer frames.
+ IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
+ duplicate_ack_++;
+ } else {
+ duplicate_ack_ = 0;
+ }
+ if (duplicate_ack_ >= 2 && duplicate_ack_ % 3 == 2) {
+ // Resend last ACK + 1 frame.
+ resend_frame = static_cast<uint8>(last_acked_frame_id_ + 1);
+ }
+ if (resend_frame != -1) {
+ ResendFrame(static_cast<uint8>(resend_frame));
+ }
+ } else {
+ rtp_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
+ last_send_time_ = clock_->NowTicks();
+
+ uint32 new_bitrate = 0;
+ if (congestion_control_.OnNack(rtt, &new_bitrate)) {
+ video_encoder_controller_->SetBitRate(new_bitrate);
+ }
+ }
+ ReceivedAck(cast_feedback.ack_frame_id_);
+}
+
+void VideoSender::ReceivedAck(uint8 acked_frame_id) {
+ last_acked_frame_id_ = acked_frame_id;
+ UpdateFramesInFlight();
+}
+
+void VideoSender::UpdateFramesInFlight() {
+ if (last_sent_frame_id_ != -1) {
+ uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
+ static_cast<uint8>(last_acked_frame_id_);
+ if (frames_in_flight >= max_unacked_frames_) {
+ video_encoder_controller_->SkipNextFrame(true);
+ return;
+ }
+ }
+ video_encoder_controller_->SkipNextFrame(false);
+}
+
+void VideoSender::ResendFrame(uint8 resend_frame_id) {
+ MissingFramesAndPacketsMap missing_frames_and_packets;
+ PacketIdSet missing;
+ missing_frames_and_packets.insert(std::make_pair(resend_frame_id, missing));
+ rtp_sender_->ResendPackets(missing_frames_and_packets);
+ last_send_time_ = clock_->NowTicks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/video_sender.gypi b/chromium/media/cast/video_sender/video_sender.gypi
new file mode 100644
index 00000000000..9499066165f
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_sender.gypi
@@ -0,0 +1,31 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'codecs/vp8/vp8_encoder.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'video_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'video_encoder.h',
+ 'video_encoder.cc',
+ 'video_sender.h',
+ 'video_sender.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/media/cast/rtcp/rtcp.gyp:*',
+ '<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
+ 'congestion_control',
+ 'cast_vp8_encoder',
+ ],
+ },
+ ],
+}
diff --git a/chromium/media/cast/video_sender/video_sender.h b/chromium/media/cast/video_sender/video_sender.h
new file mode 100644
index 00000000000..9098e975c4f
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_sender.h
@@ -0,0 +1,145 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+#define MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/congestion_control/congestion_control.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace media {
+namespace cast {
+
+class VideoEncoder;
+class LocalRtcpVideoSenderFeedback;
+class LocalRtpVideoSenderStatistics;
+class LocalVideoEncoderCallback;
+class PacedPacketSender;
+
+// Not thread safe. Only called from the main cast thread.
+// This class owns all objects related to sending video, objects that create RTP
+// packets, congestion control, video encoder, parsing and sending of
+// RTCP packets.
+// Additionally it posts a bunch of delayed tasks to the main thread for various
+// timeouts.
+class VideoSender : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<VideoSender> {
+ public:
+ VideoSender(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender);
+
+ virtual ~VideoSender();
+
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ void InsertRawVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback);
+
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback);
+
+ // Only called from the main cast thread.
+ void IncomingRtcpPacket(const uint8* packet, int length,
+ const base::Closure callback);
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ congestion_control_.set_clock(clock);
+ rtcp_->set_clock(clock);
+ rtp_sender_->set_clock(clock);
+ }
+
+ protected:
+ // Protected for testability.
+ void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
+
+ private:
+ friend class LocalRtcpVideoSenderFeedback;
+
+ // Schedule when we should send the next RTPC report,
+ // via a PostDelayedTask to the main cast thread.
+ void ScheduleNextRtcpReport();
+ void SendRtcpReport();
+
+ // Schedule when we should check that we have received an acknowledgment, or a
+ // loss report from our remote peer. If we have not heard back from our remote
+ // peer we speculatively resend our oldest unacknowledged frame (the whole
+ // frame). Note for this to happen we need to lose all pending packets (in
+ // normal operation 3 full frames), hence this is the last resort to prevent
+ // us getting stuck after a long outage.
+ void ScheduleNextResendCheck();
+ void ResendCheck();
+
+ // Monitor how many frames that are silently dropped by the video sender
+ // per time unit.
+ void ScheduleNextSkippedFramesCheck();
+ void SkippedFramesCheck();
+
+ void SendEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
+ void OnReceivedIntraFrameRequest();
+ void ResendFrame(uint8 resend_frame_id);
+ void ReceivedAck(uint8 acked_frame_id);
+ void UpdateFramesInFlight();
+
+ void SendEncodedVideoFrameMainThread(
+ scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& capture_time);
+
+ const uint32 incoming_feedback_ssrc_;
+ const base::TimeDelta rtp_max_delay_;
+ const int max_frame_rate_;
+
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<LocalRtcpVideoSenderFeedback> rtcp_feedback_;
+ scoped_ptr<LocalRtpVideoSenderStatistics> rtp_video_sender_statistics_;
+ scoped_refptr<VideoEncoder> video_encoder_;
+ scoped_ptr<Rtcp> rtcp_;
+ scoped_ptr<RtpSender> rtp_sender_;
+ VideoEncoderController* video_encoder_controller_;
+ uint8 max_unacked_frames_;
+ int last_acked_frame_id_;
+ int last_sent_frame_id_;
+ int last_sent_key_frame_id_;
+ int duplicate_ack_;
+ base::TimeTicks last_send_time_;
+ base::TimeTicks last_checked_skip_count_time_;
+ int last_skip_count_;
+ CongestionControl congestion_control_;
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ base::WeakPtrFactory<VideoSender> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoSender);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+
diff --git a/chromium/media/cast/video_sender/video_sender_unittest.cc b/chromium/media/cast/video_sender/video_sender_unittest.cc
new file mode 100644
index 00000000000..72582a7ff3c
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_sender_unittest.cc
@@ -0,0 +1,204 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/video_sender/mock_video_encoder_controller.h"
+#include "media/cast/video_sender/video_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+using testing::_;
+
+class PeerVideoSender : public VideoSender {
+ public:
+ PeerVideoSender(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender)
+ : VideoSender(cast_thread, video_config, video_encoder_controller,
+ paced_packet_sender) {
+ }
+ using VideoSender::OnReceivedCastFeedback;
+};
+
+static void ReleaseVideoFrame(const I420VideoFrame* frame) {
+ delete [] frame->y_plane.data;
+ delete [] frame->u_plane.data;
+ delete [] frame->v_plane.data;
+ delete frame;
+}
+
+static void ReleaseEncodedFrame(const EncodedVideoFrame* frame) {
+ // Do nothing.
+}
+
+class VideoSenderTest : public ::testing::Test {
+ protected:
+ VideoSenderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ ~VideoSenderTest() {}
+
+ void InitEncoder(bool external) {
+ VideoSenderConfig video_config;
+ video_config.sender_ssrc = 1;
+ video_config.incoming_feedback_ssrc = 2;
+ video_config.rtp_payload_type = 127;
+ video_config.use_external_encoder = external;
+ video_config.width = 320;
+ video_config.height = 240;
+ video_config.max_bitrate = 5000000;
+ video_config.min_bitrate = 1000000;
+ video_config.start_bitrate = 1000000;
+ video_config.max_qp = 56;
+ video_config.min_qp = 0;
+ video_config.max_frame_rate = 30;
+ video_config.max_number_of_video_buffers_used = 3;
+ video_config.codec = kVp8;
+
+ if (external) {
+ video_sender_.reset(new PeerVideoSender(cast_thread_, video_config,
+ &mock_video_encoder_controller_, &mock_transport_));
+ } else {
+ video_sender_.reset(new PeerVideoSender(cast_thread_, video_config, NULL,
+ &mock_transport_));
+ }
+ video_sender_->set_clock(&testing_clock_);
+ }
+
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
+ task_runner_, task_runner_);
+ }
+
+ I420VideoFrame* AllocateNewVideoFrame() {
+ I420VideoFrame* video_frame = new I420VideoFrame();
+ video_frame->width = 320;
+ video_frame->height = 240;
+
+ video_frame->y_plane.stride = video_frame->width;
+ video_frame->y_plane.length = video_frame->width;
+ video_frame->y_plane.data =
+ new uint8[video_frame->width * video_frame->height];
+ memset(video_frame->y_plane.data, 123,
+ video_frame->width * video_frame->height);
+ video_frame->u_plane.stride = video_frame->width / 2;
+ video_frame->u_plane.length = video_frame->width / 2;
+ video_frame->u_plane.data =
+ new uint8[video_frame->width * video_frame->height / 4];
+ memset(video_frame->u_plane.data, 123,
+ video_frame->width * video_frame->height / 4);
+ video_frame->v_plane.stride = video_frame->width / 2;
+ video_frame->v_plane.length = video_frame->width / 2;
+ video_frame->v_plane.data =
+ new uint8[video_frame->width * video_frame->height / 4];
+ memset(video_frame->v_plane.data, 123,
+ video_frame->width * video_frame->height / 4);
+ return video_frame;
+ }
+
+ MockVideoEncoderController mock_video_encoder_controller_;
+ base::SimpleTestTickClock testing_clock_;
+ MockPacedPacketSender mock_transport_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_ptr<PeerVideoSender> video_sender_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(VideoSenderTest, BuiltInEncoder) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+
+ InitEncoder(false);
+ I420VideoFrame* video_frame = AllocateNewVideoFrame();
+
+ base::TimeTicks capture_time;
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ task_runner_->RunTasks();
+}
+
+TEST_F(VideoSenderTest, ExternalEncoder) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+ EXPECT_CALL(mock_video_encoder_controller_, SkipNextFrame(false)).Times(1);
+ InitEncoder(true);
+
+ EncodedVideoFrame video_frame;
+ base::TimeTicks capture_time;
+
+ video_frame.codec = kVp8;
+ video_frame.key_frame = true;
+ video_frame.frame_id = 0;
+ video_frame.last_referenced_frame_id = 0;
+ video_frame.data.insert(video_frame.data.begin(), 123, 1000);
+
+ video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
+ base::Bind(&ReleaseEncodedFrame, &video_frame));
+}
+
+TEST_F(VideoSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
+ InitEncoder(false);
+
+ // Make sure that we send at least one RTCP packet.
+ base::TimeDelta max_rtcp_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
+
+ testing_clock_.Advance(max_rtcp_timeout);
+ task_runner_->RunTasks();
+}
+
+TEST_F(VideoSenderTest, ResendTimer) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(2);
+ EXPECT_CALL(mock_transport_, ResendPacket(_, _)).Times(1);
+
+ InitEncoder(false);
+
+ I420VideoFrame* video_frame = AllocateNewVideoFrame();
+
+ base::TimeTicks capture_time;
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ task_runner_->RunTasks();
+
+ // ACK the key frame.
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+
+ video_frame = AllocateNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ task_runner_->RunTasks();
+
+ base::TimeDelta max_resend_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtpMaxDelayMs);
+
+ // Make sure that we do a re-send.
+ testing_clock_.Advance(max_resend_timeout);
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
+
diff --git a/chromium/media/cdm/aes_decryptor.cc b/chromium/media/cdm/aes_decryptor.cc
index da11442a37e..33717e03a58 100644
--- a/chromium/media/cdm/aes_decryptor.cc
+++ b/chromium/media/cdm/aes_decryptor.cc
@@ -6,9 +6,13 @@
#include <vector>
+#include "base/base64.h"
+#include "base/json/json_reader.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/values.h"
#include "crypto/encryptor.h"
#include "crypto/symmetric_key.h"
#include "media/base/audio_decoder_config.h"
@@ -26,6 +30,8 @@ enum ClearBytesBufferSel {
kDstContainsClearBytes
};
+typedef std::vector<std::pair<std::string, std::string> > JWKKeys;
+
static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
const ClearBytesBufferSel sel,
const uint8* src,
@@ -43,6 +49,105 @@ static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
}
}
+// Processes a JSON Web Key to extract the key id and key value. Adds the
+// id/value pair to |jwk_keys| and returns true on success.
+static bool ProcessSymmetricKeyJWK(const DictionaryValue& jwk,
+ JWKKeys* jwk_keys) {
+ // A symmetric keys JWK looks like the following in JSON:
+ // { "kty":"oct",
+ // "kid":"AAECAwQFBgcICQoLDA0ODxAREhM=",
+ // "k":"FBUWFxgZGhscHR4fICEiIw==" }
+ // There may be other properties specified, but they are ignored.
+ // Ref: http://tools.ietf.org/html/draft-ietf-jose-json-web-key-14
+ // and:
+ // http://tools.ietf.org/html/draft-jones-jose-json-private-and-symmetric-key-00
+
+ // Have found a JWK, start by checking that it is a symmetric key.
+ std::string type;
+ if (!jwk.GetString("kty", &type) || type != "oct") {
+ DVLOG(1) << "JWK is not a symmetric key";
+ return false;
+ }
+
+ // Get the key id and actual key parameters.
+ std::string encoded_key_id;
+ std::string encoded_key;
+ if (!jwk.GetString("kid", &encoded_key_id)) {
+ DVLOG(1) << "Missing 'kid' parameter";
+ return false;
+ }
+ if (!jwk.GetString("k", &encoded_key)) {
+ DVLOG(1) << "Missing 'k' parameter";
+ return false;
+ }
+
+ // Key ID and key are base64-encoded strings, so decode them.
+ // TODO(jrummell): The JWK spec and the EME spec don't say that 'kid' must be
+ // base64-encoded (they don't say anything at all). Verify with the EME spec.
+ std::string decoded_key_id;
+ std::string decoded_key;
+ if (!base::Base64Decode(encoded_key_id, &decoded_key_id) ||
+ decoded_key_id.empty()) {
+ DVLOG(1) << "Invalid 'kid' value";
+ return false;
+ }
+ if (!base::Base64Decode(encoded_key, &decoded_key) ||
+ decoded_key.length() !=
+ static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
+ DVLOG(1) << "Invalid length of 'k' " << decoded_key.length();
+ return false;
+ }
+
+ // Add the decoded key ID and the decoded key to the list.
+ jwk_keys->push_back(std::make_pair(decoded_key_id, decoded_key));
+ return true;
+}
+
+// Extracts the JSON Web Keys from a JSON Web Key Set. If |input| looks like
+// a valid JWK Set, then true is returned and |jwk_keys| is updated to contain
+// the list of keys found. Otherwise return false.
+static bool ExtractJWKKeys(const std::string& input, JWKKeys* jwk_keys) {
+ // TODO(jrummell): The EME spec references a smaller set of allowed ASCII
+ // values. Verify with spec that the smaller character set is needed.
+ if (!IsStringASCII(input))
+ return false;
+
+ scoped_ptr<Value> root(base::JSONReader().ReadToValue(input));
+ if (!root.get() || root->GetType() != Value::TYPE_DICTIONARY)
+ return false;
+
+ // A JSON Web Key Set looks like the following in JSON:
+ // { "keys": [ JWK1, JWK2, ... ] }
+ // (See ProcessSymmetricKeyJWK() for description of JWK.)
+ // There may be other properties specified, but they are ignored.
+ // Locate the set from the dictionary.
+ DictionaryValue* dictionary = static_cast<DictionaryValue*>(root.get());
+ ListValue* list_val = NULL;
+ if (!dictionary->GetList("keys", &list_val)) {
+ DVLOG(1) << "Missing 'keys' parameter or not a list in JWK Set";
+ return false;
+ }
+
+ // Create a local list of keys, so that |jwk_keys| only gets updated on
+ // success.
+ JWKKeys local_keys;
+ for (size_t i = 0; i < list_val->GetSize(); ++i) {
+ DictionaryValue* jwk = NULL;
+ if (!list_val->GetDictionary(i, &jwk)) {
+ DVLOG(1) << "Unable to access 'keys'[" << i << "] in JWK Set";
+ return false;
+ }
+ if (!ProcessSymmetricKeyJWK(*jwk, &local_keys)) {
+ DVLOG(1) << "Error from 'keys'[" << i << "]";
+ return false;
+ }
+ }
+
+ // Successfully processed all JWKs in the set.
+ jwk_keys->swap(local_keys);
+ return true;
+}
+
// Decrypts |input| using |key|. Returns a DecoderBuffer with the decrypted
// data if decryption succeeded or NULL if decryption failed.
static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
@@ -67,10 +172,11 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
const int data_offset = input.decrypt_config()->data_offset();
const char* sample =
reinterpret_cast<const char*>(input.data() + data_offset);
- int sample_size = input.data_size() - data_offset;
+ DCHECK_GT(input.data_size(), data_offset);
+ size_t sample_size = static_cast<size_t>(input.data_size() - data_offset);
- DCHECK_GT(sample_size, 0) << "No sample data to be decrypted.";
- if (sample_size <= 0)
+ DCHECK_GT(sample_size, 0U) << "No sample data to be decrypted.";
+ if (sample_size == 0)
return NULL;
if (input.decrypt_config()->subsamples().empty()) {
@@ -90,13 +196,18 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
const std::vector<SubsampleEntry>& subsamples =
input.decrypt_config()->subsamples();
- int total_clear_size = 0;
- int total_encrypted_size = 0;
+ size_t total_clear_size = 0;
+ size_t total_encrypted_size = 0;
for (size_t i = 0; i < subsamples.size(); i++) {
total_clear_size += subsamples[i].clear_bytes;
total_encrypted_size += subsamples[i].cypher_bytes;
+ // Check for overflow. This check is valid because *_size is unsigned.
+ DCHECK(total_clear_size >= subsamples[i].clear_bytes);
+ if (total_encrypted_size < subsamples[i].cypher_bytes)
+ return NULL;
}
- if (total_clear_size + total_encrypted_size != sample_size) {
+ size_t total_size = total_clear_size + total_encrypted_size;
+ if (total_size < total_clear_size || total_size != sample_size) {
DVLOG(1) << "Subsample sizes do not equal input size";
return NULL;
}
@@ -170,41 +281,65 @@ void AesDecryptor::AddKey(const uint8* key,
CHECK(key);
CHECK_GT(key_length, 0);
+ // AddKey() is called from update(), where the key(s) are passed as a JSON
+ // Web Key (JWK) set. Each JWK needs to be a symmetric key ('kty' = "oct"),
+ // with 'kid' being the base64-encoded key id, and 'k' being the
+ // base64-encoded key.
+ //
+ // For backwards compatibility with v0.1b of the spec (where |key| is the raw
+ // key and |init_data| is the key id), if |key| is not valid JSON, then
+ // attempt to process it as a raw key.
+
// TODO(xhwang): Add |session_id| check after we figure out how:
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16550
- if (key_length != DecryptConfig::kDecryptionKeySize) {
- DVLOG(1) << "Invalid key length: " << key_length;
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
- // TODO(xhwang): Fix the decryptor to accept no |init_data|. See
- // http://crbug.com/123265. Until then, ensure a non-empty value is passed.
- static const uint8 kDummyInitData[1] = { 0 };
- if (!init_data) {
- init_data = kDummyInitData;
- init_data_length = arraysize(kDummyInitData);
- }
+ std::string key_string(reinterpret_cast<const char*>(key), key_length);
+ JWKKeys jwk_keys;
+ if (ExtractJWKKeys(key_string, &jwk_keys)) {
+ // Since |key| represents valid JSON, init_data must be empty.
+ DCHECK(!init_data);
+ DCHECK_EQ(init_data_length, 0);
- // TODO(xhwang): For now, use |init_data| for key ID. Make this more spec
- // compliant later (http://crbug.com/123262, http://crbug.com/123265).
- std::string key_id_string(reinterpret_cast<const char*>(init_data),
- init_data_length);
- std::string key_string(reinterpret_cast<const char*>(key) , key_length);
- scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
- if (!decryption_key) {
- DVLOG(1) << "Could not create key.";
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
+ // Make sure that at least one key was extracted.
+ if (jwk_keys.empty()) {
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+ for (JWKKeys::iterator it = jwk_keys.begin() ; it != jwk_keys.end(); ++it) {
+ if (!AddDecryptionKey(it->first, it->second)) {
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+ }
+ } else {
+ // v0.1b backwards compatibility support.
+ // TODO(jrummell): Remove this code once v0.1b no longer supported.
- if (!decryption_key->Init()) {
- DVLOG(1) << "Could not initialize decryption key.";
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
+ if (key_string.length() !=
+ static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
+ DVLOG(1) << "Invalid key length: " << key_string.length();
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+
+ // TODO(xhwang): Fix the decryptor to accept no |init_data|. See
+ // http://crbug.com/123265. Until then, ensure a non-empty value is passed.
+ static const uint8 kDummyInitData[1] = {0};
+ if (!init_data) {
+ init_data = kDummyInitData;
+ init_data_length = arraysize(kDummyInitData);
+ }
- SetKey(key_id_string, decryption_key.Pass());
+ // TODO(xhwang): For now, use |init_data| for key ID. Make this more spec
+ // compliant later (http://crbug.com/123262, http://crbug.com/123265).
+ std::string key_id_string(reinterpret_cast<const char*>(init_data),
+ init_data_length);
+ if (!AddDecryptionKey(key_id_string, key_string)) {
+ // Error logged in AddDecryptionKey()
+ key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+ }
if (!new_audio_key_cb_.is_null())
new_audio_key_cb_.Run();
@@ -306,8 +441,19 @@ void AesDecryptor::DeinitializeDecoder(StreamType stream_type) {
NOTREACHED() << "AesDecryptor does not support audio/video decoding";
}
-void AesDecryptor::SetKey(const std::string& key_id,
- scoped_ptr<DecryptionKey> decryption_key) {
+bool AesDecryptor::AddDecryptionKey(const std::string& key_id,
+ const std::string& key_string) {
+ scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
+ if (!decryption_key) {
+ DVLOG(1) << "Could not create key.";
+ return false;
+ }
+
+ if (!decryption_key->Init()) {
+ DVLOG(1) << "Could not initialize decryption key.";
+ return false;
+ }
+
base::AutoLock auto_lock(key_map_lock_);
KeyMap::iterator found = key_map_.find(key_id);
if (found != key_map_.end()) {
@@ -315,6 +461,7 @@ void AesDecryptor::SetKey(const std::string& key_id,
key_map_.erase(found);
}
key_map_[key_id] = decryption_key.release();
+ return true;
}
AesDecryptor::DecryptionKey* AesDecryptor::GetKey(
diff --git a/chromium/media/cdm/aes_decryptor.h b/chromium/media/cdm/aes_decryptor.h
index fda5a0facab..3ab4bc0f9f4 100644
--- a/chromium/media/cdm/aes_decryptor.h
+++ b/chromium/media/cdm/aes_decryptor.h
@@ -86,8 +86,10 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
DISALLOW_COPY_AND_ASSIGN(DecryptionKey);
};
- // Sets |key| for |key_id|. The AesDecryptor takes the ownership of the |key|.
- void SetKey(const std::string& key_id, scoped_ptr<DecryptionKey> key);
+ // Creates a DecryptionKey using |key_string| and associates it with |key_id|.
+ // Returns true if successful.
+ bool AddDecryptionKey(const std::string& key_id,
+ const std::string& key_string);
// Gets a DecryptionKey associated with |key_id|. The AesDecryptor still owns
// the key. Returns NULL if no key is associated with |key_id|.
diff --git a/chromium/media/cdm/aes_decryptor_unittest.cc b/chromium/media/cdm/aes_decryptor_unittest.cc
index 1edb8e82220..a4b865c4690 100644
--- a/chromium/media/cdm/aes_decryptor_unittest.cc
+++ b/chromium/media/cdm/aes_decryptor_unittest.cc
@@ -16,124 +16,79 @@
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
-using ::testing::ElementsAreArray;
using ::testing::Gt;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SaveArg;
-using ::testing::StrEq;
using ::testing::StrNe;
MATCHER(IsEmpty, "") { return arg.empty(); }
namespace media {
-// |encrypted_data| is encrypted from |plain_text| using |key|. |key_id| is
-// used to distinguish |key|.
-struct WebmEncryptedData {
- uint8 plain_text[32];
- int plain_text_size;
- uint8 key_id[32];
- int key_id_size;
- uint8 key[32];
- int key_size;
- uint8 encrypted_data[64];
- int encrypted_data_size;
-};
-
static const char kClearKeySystem[] = "org.w3.clearkey";
-// Frames 0 & 1 are encrypted with the same key. Frame 2 is encrypted with a
-// different key. Frame 3 is unencrypted.
-const WebmEncryptedData kWebmEncryptedFrames[] = {
- {
- // plaintext
- "Original data.", 14,
- // key_id
- { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13
- }, 20,
- // key
- { 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
- 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23
- }, 16,
- // encrypted_data
- { 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xf0, 0xd1, 0x12, 0xd5, 0x24, 0x81, 0x96,
- 0x55, 0x1b, 0x68, 0x9f, 0x38, 0x91, 0x85
- }, 23
- }, {
- // plaintext
- "Changed Original data.", 22,
- // key_id
- { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13
- }, 20,
- // key
- { 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
- 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23
- }, 16,
- // encrypted_data
- { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x57, 0x66, 0xf4, 0x12, 0x1a, 0xed, 0xb5,
- 0x79, 0x1c, 0x8e, 0x25, 0xd7, 0x17, 0xe7, 0x5e,
- 0x16, 0xe3, 0x40, 0x08, 0x27, 0x11, 0xe9
- }, 31
- }, {
- // plaintext
- "Original data.", 14,
- // key_id
- { 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
- 0x2c, 0x2d, 0x2e, 0x2f, 0x30
- }, 13,
- // key
- { 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
- 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40
- }, 16,
- // encrypted_data
- { 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x9c, 0x71, 0x26, 0x57, 0x3e, 0x25, 0x37,
- 0xf7, 0x31, 0x81, 0x19, 0x64, 0xce, 0xbc
- }, 23
- }, {
- // plaintext
- "Changed Original data.", 22,
- // key_id
- { 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
- 0x2c, 0x2d, 0x2e, 0x2f, 0x30
- }, 13,
- // key
- { 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
- 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40
- }, 16,
- // encrypted_data
- { 0x00, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64,
- 0x20, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61,
- 0x6c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e
- }, 23
- }
-};
-
-static const uint8 kWebmWrongSizedKey[] = { 0x20, 0x20 };
-
-static const uint8 kSubsampleOriginalData[] = "Original subsample data.";
-static const int kSubsampleOriginalDataSize = 24;
+static const uint8 kOriginalData[] = "Original subsample data.";
+static const int kOriginalDataSize = 24;
-static const uint8 kSubsampleKeyId[] = { 0x00, 0x01, 0x02, 0x03 };
+static const uint8 kKeyId[] = {
+ // base64 equivalent is AAECAw==
+ 0x00, 0x01, 0x02, 0x03
+};
-static const uint8 kSubsampleKey[] = {
- 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
- 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13
+static const uint8 kKey[] = {
+ // base64 equivalent is BAUGBwgJCgsMDQ4PEBESEw==
+ 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13
};
-static const uint8 kSubsampleIv[] = {
+static const char kKeyAsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+
+static const char kWrongKeyAsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"7u7u7u7u7u7u7u7u7u7u7g==\""
+ " }"
+ " ]"
+ "}";
+
+static const char kWrongSizedKeyAsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"AAECAw==\""
+ " }"
+ " ]"
+ "}";
+
+static const uint8 kIv[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-// kSubsampleOriginalData encrypted with kSubsampleKey and kSubsampleIv using
+// kOriginalData encrypted with kKey and kIv but without any subsamples (or
+// equivalently using kSubsampleEntriesCypherOnly).
+static const uint8 kEncryptedData[] = {
+ 0x2f, 0x03, 0x09, 0xef, 0x71, 0xaf, 0x31, 0x16,
+ 0xfa, 0x9d, 0x18, 0x43, 0x1e, 0x96, 0x71, 0xb5,
+ 0xbf, 0xf5, 0x30, 0x53, 0x9a, 0x20, 0xdf, 0x95
+};
+
+// kOriginalData encrypted with kSubsampleKey and kSubsampleIv using
// kSubsampleEntriesNormal.
static const uint8 kSubsampleEncryptedData[] = {
0x4f, 0x72, 0x09, 0x16, 0x09, 0xe6, 0x79, 0xad,
@@ -141,25 +96,42 @@ static const uint8 kSubsampleEncryptedData[] = {
0x4d, 0x08, 0xd7, 0x78, 0xa4, 0xa7, 0xf1, 0x2e
};
-// kSubsampleEncryptedData with 8 bytes padding at the beginning.
-static const uint8 kPaddedSubsampleEncryptedData[] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x4f, 0x72, 0x09, 0x16, 0x09, 0xe6, 0x79, 0xad,
- 0x70, 0x73, 0x75, 0x62, 0x09, 0xbb, 0x83, 0x1d,
- 0x4d, 0x08, 0xd7, 0x78, 0xa4, 0xa7, 0xf1, 0x2e
+static const uint8 kOriginalData2[] = "Changed Original data.";
+
+static const uint8 kIv2[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-// kSubsampleOriginalData encrypted with kSubsampleKey and kSubsampleIv but
-// without any subsamples (or equivalently using kSubsampleEntriesCypherOnly).
-static const uint8 kEncryptedData[] = {
- 0x2f, 0x03, 0x09, 0xef, 0x71, 0xaf, 0x31, 0x16,
- 0xfa, 0x9d, 0x18, 0x43, 0x1e, 0x96, 0x71, 0xb5,
- 0xbf, 0xf5, 0x30, 0x53, 0x9a, 0x20, 0xdf, 0x95
+static const uint8 kKeyId2[] = {
+ // base64 equivalent is AAECAwQFBgcICQoLDA0ODxAREhM=
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13
+};
+
+static const char kKey2AsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ " }"
+ " ]"
+ "}";
+
+// 'k' in bytes is x14x15x16x17x18x19x1ax1bx1cx1dx1ex1fx20x21x22x23
+
+static const uint8 kEncryptedData2[] = {
+ 0x57, 0x66, 0xf4, 0x12, 0x1a, 0xed, 0xb5, 0x79,
+ 0x1c, 0x8e, 0x25, 0xd7, 0x17, 0xe7, 0x5e, 0x16,
+ 0xe3, 0x40, 0x08, 0x27, 0x11, 0xe9
};
// Subsample entries for testing. The sum of |cypher_bytes| and |clear_bytes| of
-// all entries must be equal to kSubsampleOriginalDataSize to make the subsample
-// entries valid.
+// all entries must be equal to kOriginalDataSize to make the subsample entries
+// valid.
static const SubsampleEntry kSubsampleEntriesNormal[] = {
{ 2, 7 },
@@ -167,6 +139,18 @@ static const SubsampleEntry kSubsampleEntriesNormal[] = {
{ 1, 0 }
};
+static const SubsampleEntry kSubsampleEntriesWrongSize[] = {
+ { 3, 6 }, // This entry doesn't match the correct entry.
+ { 3, 11 },
+ { 1, 0 }
+};
+
+static const SubsampleEntry kSubsampleEntriesInvalidTotalSize[] = {
+ { 1, 1000 }, // This entry is too large.
+ { 3, 11 },
+ { 1, 0 }
+};
+
static const SubsampleEntry kSubsampleEntriesClearOnly[] = {
{ 7, 0 },
{ 8, 0 },
@@ -179,74 +163,24 @@ static const SubsampleEntry kSubsampleEntriesCypherOnly[] = {
{ 0, 10 }
};
-// Generates a 16 byte CTR counter block. The CTR counter block format is a
-// CTR IV appended with a CTR block counter. |iv| is an 8 byte CTR IV.
-// |iv_size| is the size of |iv| in bytes. Returns a string of
-// kDecryptionKeySize bytes.
-static std::string GenerateCounterBlock(const uint8* iv, int iv_size) {
- CHECK_GT(iv_size, 0);
- CHECK_LE(iv_size, DecryptConfig::kDecryptionKeySize);
-
- std::string counter_block(reinterpret_cast<const char*>(iv), iv_size);
- counter_block.append(DecryptConfig::kDecryptionKeySize - iv_size, 0);
- return counter_block;
-}
-
-// Creates a WebM encrypted buffer that the demuxer would pass to the
-// decryptor. |data| is the payload of a WebM encrypted Block. |key_id| is
-// initialization data from the WebM file. Every encrypted Block has
-// a signal byte prepended to a frame. If the frame is encrypted then an IV is
-// prepended to the Block. Current encrypted WebM request for comments
-// specification is here
-// http://wiki.webmproject.org/encryption/webm-encryption-rfc
-static scoped_refptr<DecoderBuffer> CreateWebMEncryptedBuffer(
- const uint8* data, int data_size,
- const uint8* key_id, int key_id_size) {
- scoped_refptr<DecoderBuffer> encrypted_buffer = DecoderBuffer::CopyFrom(
- data, data_size);
- CHECK(encrypted_buffer.get());
- DCHECK_EQ(kWebMSignalByteSize, 1);
-
- uint8 signal_byte = data[0];
- int data_offset = kWebMSignalByteSize;
-
- // Setting the DecryptConfig object of the buffer while leaving the
- // initialization vector empty will tell the decryptor that the frame is
- // unencrypted.
- std::string counter_block_str;
-
- if (signal_byte & kWebMFlagEncryptedFrame) {
- counter_block_str = GenerateCounterBlock(data + data_offset, kWebMIvSize);
- data_offset += kWebMIvSize;
- }
-
- encrypted_buffer->set_decrypt_config(
- scoped_ptr<DecryptConfig>(new DecryptConfig(
- std::string(reinterpret_cast<const char*>(key_id), key_id_size),
- counter_block_str,
- data_offset,
- std::vector<SubsampleEntry>())));
- return encrypted_buffer;
-}
-
-// TODO(xhwang): Refactor this function to encapsulate more details about
-// creating an encrypted DecoderBuffer with subsamples so we don't have so much
-// boilerplate code in each test before calling this function.
-static scoped_refptr<DecoderBuffer> CreateSubsampleEncryptedBuffer(
- const uint8* data, int data_size,
- const uint8* key_id, int key_id_size,
- const uint8* iv, int iv_size,
- int data_offset,
+static scoped_refptr<DecoderBuffer> CreateEncryptedBuffer(
+ const std::vector<uint8>& data,
+ const std::vector<uint8>& key_id,
+ const std::vector<uint8>& iv,
+ int offset,
const std::vector<SubsampleEntry>& subsample_entries) {
- scoped_refptr<DecoderBuffer> encrypted_buffer =
- DecoderBuffer::CopyFrom(data, data_size);
+ DCHECK(!data.empty());
+ int padded_size = offset + data.size();
+ scoped_refptr<DecoderBuffer> encrypted_buffer(new DecoderBuffer(padded_size));
+ memcpy(encrypted_buffer->writable_data() + offset, &data[0], data.size());
CHECK(encrypted_buffer.get());
- encrypted_buffer->set_decrypt_config(
- scoped_ptr<DecryptConfig>(new DecryptConfig(
- std::string(reinterpret_cast<const char*>(key_id), key_id_size),
- std::string(reinterpret_cast<const char*>(iv), iv_size),
- data_offset,
- subsample_entries)));
+ std::string key_id_string(
+ reinterpret_cast<const char*>(key_id.empty() ? NULL : &key_id[0]),
+ key_id.size());
+ std::string iv_string(
+ reinterpret_cast<const char*>(iv.empty() ? NULL : &iv[0]), iv.size());
+ encrypted_buffer->set_decrypt_config(scoped_ptr<DecryptConfig>(
+ new DecryptConfig(key_id_string, iv_string, offset, subsample_entries)));
return encrypted_buffer;
}
@@ -259,78 +193,116 @@ class AesDecryptorTest : public testing::Test {
base::Bind(&AesDecryptorTest::KeyMessage, base::Unretained(this))),
decrypt_cb_(base::Bind(&AesDecryptorTest::BufferDecrypted,
base::Unretained(this))),
- subsample_entries_normal_(
+ original_data_(kOriginalData, kOriginalData + kOriginalDataSize),
+ encrypted_data_(kEncryptedData,
+ kEncryptedData + arraysize(kEncryptedData)),
+ subsample_encrypted_data_(
+ kSubsampleEncryptedData,
+ kSubsampleEncryptedData + arraysize(kSubsampleEncryptedData)),
+ key_id_(kKeyId, kKeyId + arraysize(kKeyId)),
+ iv_(kIv, kIv + arraysize(kIv)),
+ normal_subsample_entries_(
kSubsampleEntriesNormal,
kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)) {
}
protected:
- void GenerateKeyRequest(const uint8* key_id, int key_id_size) {
- EXPECT_CALL(*this, KeyMessage(
- StrNe(std::string()), ElementsAreArray(key_id, key_id_size), ""))
+ void GenerateKeyRequest(const std::vector<uint8>& key_id) {
+ DCHECK(!key_id.empty());
+ EXPECT_CALL(*this, KeyMessage(StrNe(std::string()), key_id, ""))
.WillOnce(SaveArg<0>(&session_id_string_));
EXPECT_TRUE(decryptor_.GenerateKeyRequest(
- std::string(), key_id, key_id_size));
+ std::string(), &key_id[0], key_id.size()));
}
- void AddKeyAndExpectToSucceed(const uint8* key_id, int key_id_size,
- const uint8* key, int key_size) {
- EXPECT_CALL(*this, KeyAdded(session_id_string_));
- decryptor_.AddKey(key, key_size, key_id, key_id_size,
+ enum AddKeyExpectation {
+ KEY_ADDED,
+ KEY_ERROR
+ };
+
+ void AddRawKeyAndExpect(const std::vector<uint8>& key_id,
+ const std::vector<uint8>& key,
+ AddKeyExpectation result) {
+ // TODO(jrummell): Remove once raw keys no longer supported.
+ DCHECK(!key_id.empty());
+ DCHECK(!key.empty());
+
+ if (result == KEY_ADDED) {
+ EXPECT_CALL(*this, KeyAdded(session_id_string_));
+ } else if (result == KEY_ERROR) {
+ EXPECT_CALL(*this, KeyError(session_id_string_,
+ MediaKeys::kUnknownError, 0));
+ } else {
+ NOTREACHED();
+ }
+
+ decryptor_.AddKey(&key[0], key.size(), &key_id[0], key_id.size(),
session_id_string_);
}
- void AddKeyAndExpectToFail(const uint8* key_id, int key_id_size,
- const uint8* key, int key_size) {
- EXPECT_CALL(*this, KeyError(session_id_string_,
- MediaKeys::kUnknownError, 0));
- decryptor_.AddKey(key, key_size, key_id, key_id_size, session_id_string_);
+ void AddKeyAndExpect(const std::string& key, AddKeyExpectation result) {
+ DCHECK(!key.empty());
+
+ if (result == KEY_ADDED) {
+ EXPECT_CALL(*this, KeyAdded(session_id_string_));
+ } else if (result == KEY_ERROR) {
+ EXPECT_CALL(*this,
+ KeyError(session_id_string_, MediaKeys::kUnknownError, 0));
+ } else {
+ NOTREACHED();
+ }
+
+ decryptor_.AddKey(reinterpret_cast<const uint8*>(key.c_str()), key.length(),
+ NULL, 0,
+ session_id_string_);
}
MOCK_METHOD2(BufferDecrypted, void(Decryptor::Status,
const scoped_refptr<DecoderBuffer>&));
- void DecryptAndExpectToSucceed(const scoped_refptr<DecoderBuffer>& encrypted,
- const uint8* plain_text, int plain_text_size) {
- scoped_refptr<DecoderBuffer> decrypted;
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kSuccess, NotNull()))
- .WillOnce(SaveArg<1>(&decrypted));
-
- decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
- ASSERT_TRUE(decrypted.get());
- ASSERT_EQ(plain_text_size, decrypted->data_size());
- EXPECT_EQ(0, memcmp(plain_text, decrypted->data(), plain_text_size));
- }
+ enum DecryptExpectation {
+ SUCCESS,
+ DATA_MISMATCH,
+ DATA_AND_SIZE_MISMATCH,
+ DECRYPT_ERROR
+ };
- void DecryptAndExpectDataMismatch(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const uint8* plain_text, int plain_text_size) {
+ void DecryptAndExpect(const scoped_refptr<DecoderBuffer>& encrypted,
+ const std::vector<uint8>& plain_text,
+ DecryptExpectation result) {
scoped_refptr<DecoderBuffer> decrypted;
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kSuccess, NotNull()))
- .WillOnce(SaveArg<1>(&decrypted));
-
- decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
- ASSERT_TRUE(decrypted.get());
- ASSERT_EQ(plain_text_size, decrypted->data_size());
- EXPECT_NE(0, memcmp(plain_text, decrypted->data(), plain_text_size));
- }
- void DecryptAndExpectSizeDataMismatch(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const uint8* plain_text, int plain_text_size) {
- scoped_refptr<DecoderBuffer> decrypted;
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kSuccess, NotNull()))
- .WillOnce(SaveArg<1>(&decrypted));
+ if (result != DECRYPT_ERROR) {
+ EXPECT_CALL(*this, BufferDecrypted(Decryptor::kSuccess, NotNull()))
+ .WillOnce(SaveArg<1>(&decrypted));
+ } else {
+ EXPECT_CALL(*this, BufferDecrypted(Decryptor::kError, IsNull()))
+ .WillOnce(SaveArg<1>(&decrypted));
+ }
decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
- ASSERT_TRUE(decrypted.get());
- EXPECT_NE(plain_text_size, decrypted->data_size());
- EXPECT_NE(0, memcmp(plain_text, decrypted->data(), plain_text_size));
- }
- void DecryptAndExpectToFail(const scoped_refptr<DecoderBuffer>& encrypted) {
- EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kError, IsNull()));
- decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
+ std::vector<uint8> decrypted_text;
+ if (decrypted && decrypted->data_size()) {
+ decrypted_text.assign(
+ decrypted->data(), decrypted->data() + decrypted->data_size());
+ }
+
+ switch (result) {
+ case SUCCESS:
+ EXPECT_EQ(plain_text, decrypted_text);
+ break;
+ case DATA_MISMATCH:
+ EXPECT_EQ(plain_text.size(), decrypted_text.size());
+ EXPECT_NE(plain_text, decrypted_text);
+ break;
+ case DATA_AND_SIZE_MISMATCH:
+ EXPECT_NE(plain_text.size(), decrypted_text.size());
+ break;
+ case DECRYPT_ERROR:
+ EXPECT_TRUE(decrypted_text.empty());
+ break;
+ }
}
MOCK_METHOD1(KeyAdded, void(const std::string&));
@@ -343,7 +315,15 @@ class AesDecryptorTest : public testing::Test {
AesDecryptor decryptor_;
std::string session_id_string_;
AesDecryptor::DecryptCB decrypt_cb_;
- std::vector<SubsampleEntry> subsample_entries_normal_;
+
+ // Constants for testing.
+ const std::vector<uint8> original_data_;
+ const std::vector<uint8> encrypted_data_;
+ const std::vector<uint8> subsample_encrypted_data_;
+ const std::vector<uint8> key_id_;
+ const std::vector<uint8> iv_;
+ const std::vector<SubsampleEntry> normal_subsample_entries_;
+ const std::vector<SubsampleEntry> no_subsample_entries_;
};
TEST_F(AesDecryptorTest, GenerateKeyRequestWithNullInitData) {
@@ -351,314 +331,311 @@ TEST_F(AesDecryptorTest, GenerateKeyRequestWithNullInitData) {
EXPECT_TRUE(decryptor_.GenerateKeyRequest(std::string(), NULL, 0));
}
-TEST_F(AesDecryptorTest, NormalWebMDecryption) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+TEST_F(AesDecryptorTest, NormalDecryption) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
-TEST_F(AesDecryptorTest, UnencryptedFrameWebMDecryption) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[3];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+TEST_F(AesDecryptorTest, DecryptionWithOffset) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 23, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
+}
+
+TEST_F(AesDecryptorTest, UnencryptedFrame) {
+ // An empty iv string signals that the frame is unencrypted.
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ original_data_, key_id_, std::vector<uint8>(), 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, WrongKey) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
-
- // Change the first byte of the key.
- std::vector<uint8> wrong_key(frame.key, frame.key + frame.key_size);
- wrong_key[0]++;
-
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- &wrong_key[0], frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kWrongKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, NoKey) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data, frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kNoKey, IsNull()));
- decryptor_.Decrypt(Decryptor::kVideo, encrypted_data, decrypt_cb_);
+ decryptor_.Decrypt(Decryptor::kVideo, encrypted_buffer, decrypt_cb_);
}
TEST_F(AesDecryptorTest, KeyReplacement) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
-
- // Change the first byte of the key.
- std::vector<uint8> wrong_key(frame.key, frame.key + frame.key_size);
- wrong_key[0]++;
-
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- &wrong_key[0], frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+
+ AddKeyAndExpect(kWrongKeyAsJWK, KEY_ADDED);
+ ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
+ encrypted_buffer, original_data_, DATA_MISMATCH));
+
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
}
TEST_F(AesDecryptorTest, WrongSizedKey) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToFail(frame.key_id, frame.key_id_size,
- kWebmWrongSizedKey, arraysize(kWebmWrongSizedKey));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kWrongSizedKeyAsJWK, KEY_ERROR);
+
+ // Repeat for a raw key. Use "-1" to create a wrong sized key.
+ std::vector<uint8> wrong_sized_key(kKey, kKey + arraysize(kKey) - 1);
+ AddRawKeyAndExpect(key_id_, wrong_sized_key, KEY_ERROR);
}
TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(frame.encrypted_data,
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
-
- const WebmEncryptedData& frame2 = kWebmEncryptedFrames[2];
- GenerateKeyRequest(frame2.key_id, frame2.key_id_size);
- AddKeyAndExpectToSucceed(frame2.key_id, frame2.key_id_size,
- frame2.key, frame2.key_size);
-
- const WebmEncryptedData& frame1 = kWebmEncryptedFrames[1];
- scoped_refptr<DecoderBuffer> encrypted_data1 =
- CreateWebMEncryptedBuffer(frame1.encrypted_data,
- frame1.encrypted_data_size,
- frame1.key_id, frame1.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data1,
- frame1.plain_text,
- frame1.plain_text_size));
-
- scoped_refptr<DecoderBuffer> encrypted_data2 =
- CreateWebMEncryptedBuffer(frame2.encrypted_data,
- frame2.encrypted_data_size,
- frame2.key_id, frame2.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data2,
- frame2.plain_text,
- frame2.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 10, no_subsample_entries_);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+
+ AddKeyAndExpect(kKey2AsJWK, KEY_ADDED);
+
+ // The first key is still available after we added a second key.
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+
+ // The second key is also available.
+ encrypted_buffer = CreateEncryptedBuffer(
+ std::vector<uint8>(kEncryptedData2,
+ kEncryptedData2 + arraysize(kEncryptedData2)),
+ std::vector<uint8>(kKeyId2, kKeyId2 + arraysize(kKeyId2)),
+ std::vector<uint8>(kIv2, kIv2 + arraysize(kIv2)),
+ 30,
+ no_subsample_entries_);
+ ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
+ encrypted_buffer,
+ std::vector<uint8>(kOriginalData2,
+ kOriginalData2 + arraysize(kOriginalData2) - 1),
+ SUCCESS));
}
TEST_F(AesDecryptorTest, CorruptedIv) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change byte 13 to modify the IV. Bytes 13-20 of WebM encrypted data
- // contains the IV.
- std::vector<uint8> frame_with_bad_iv(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_bad_iv[1]++;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_bad_iv[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<uint8> bad_iv = iv_;
+ bad_iv[1]++;
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, bad_iv, 0, no_subsample_entries_);
+
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, CorruptedData) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change last byte to modify the data. Bytes 21+ of WebM encrypted data
- // contains the encrypted frame.
- std::vector<uint8> frame_with_bad_vp8_data(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_bad_vp8_data[frame.encrypted_data_size - 1]++;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_bad_vp8_data[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
-}
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
-TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[0];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change signal byte from an encrypted frame to an unencrypted frame. Byte
- // 12 of WebM encrypted data contains the signal byte.
- std::vector<uint8> frame_with_wrong_signal_byte(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_wrong_signal_byte[0] = 0;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_wrong_signal_byte[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(
- DecryptAndExpectSizeDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+ std::vector<uint8> bad_data = encrypted_data_;
+ bad_data[1]++;
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ bad_data, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
-TEST_F(AesDecryptorTest, UnencryptedAsEncryptedFailure) {
- const WebmEncryptedData& frame = kWebmEncryptedFrames[3];
- GenerateKeyRequest(frame.key_id, frame.key_id_size);
- AddKeyAndExpectToSucceed(frame.key_id, frame.key_id_size,
- frame.key, frame.key_size);
-
- // Change signal byte from an unencrypted frame to an encrypted frame. Byte
- // 0 of WebM encrypted data contains the signal byte.
- std::vector<uint8> frame_with_wrong_signal_byte(
- frame.encrypted_data, frame.encrypted_data + frame.encrypted_data_size);
- frame_with_wrong_signal_byte[0] = kWebMFlagEncryptedFrame;
-
- scoped_refptr<DecoderBuffer> encrypted_data =
- CreateWebMEncryptedBuffer(&frame_with_wrong_signal_byte[0],
- frame.encrypted_data_size,
- frame.key_id, frame.key_id_size);
- ASSERT_NO_FATAL_FAILURE(
- DecryptAndExpectSizeDataMismatch(encrypted_data,
- frame.plain_text,
- frame.plain_text_size));
+TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, std::vector<uint8>(), 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, SubsampleDecryption) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleEncryptedData, arraysize(kSubsampleEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- subsample_entries_normal_);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
- encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 0, normal_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
// Ensures noninterference of data offset and subsample mechanisms. We never
// expect to encounter this in the wild, but since the DecryptConfig doesn't
// disallow such a configuration, it should be covered.
TEST_F(AesDecryptorTest, SubsampleDecryptionWithOffset) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kPaddedSubsampleEncryptedData, arraysize(kPaddedSubsampleEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- arraysize(kPaddedSubsampleEncryptedData)
- - arraysize(kSubsampleEncryptedData),
- subsample_entries_normal_);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
- encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 23, normal_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
-// No subsample or offset.
-TEST_F(AesDecryptorTest, NormalDecryption) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kEncryptedData, arraysize(kEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- std::vector<SubsampleEntry>());
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(
- encrypted_data, kSubsampleOriginalData, kSubsampleOriginalDataSize));
+TEST_F(AesDecryptorTest, SubsampleWrongSize) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> subsample_entries_wrong_size(
+ kSubsampleEntriesWrongSize,
+ kSubsampleEntriesWrongSize + arraysize(kSubsampleEntriesWrongSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 0, subsample_entries_wrong_size);
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
-TEST_F(AesDecryptorTest, IncorrectSubsampleSize) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- std::vector<SubsampleEntry> entries = subsample_entries_normal_;
- entries[2].cypher_bytes += 1;
-
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleEncryptedData, arraysize(kSubsampleEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- entries);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToFail(encrypted_data));
+TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> subsample_entries_invalid_total_size(
+ kSubsampleEntriesInvalidTotalSize,
+ kSubsampleEntriesInvalidTotalSize +
+ arraysize(kSubsampleEntriesInvalidTotalSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ subsample_encrypted_data_, key_id_, iv_, 0,
+ subsample_entries_invalid_total_size);
+ DecryptAndExpect(encrypted_buffer, original_data_, DECRYPT_ERROR);
}
// No cypher bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- std::vector<SubsampleEntry> subsample_entries_clear_only(
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> clear_only_subsample_entries(
kSubsampleEntriesClearOnly,
kSubsampleEntriesClearOnly + arraysize(kSubsampleEntriesClearOnly));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kSubsampleOriginalData, kSubsampleOriginalDataSize,
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- subsample_entries_clear_only);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- kSubsampleOriginalData, kSubsampleOriginalDataSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ original_data_, key_id_, iv_, 0, clear_only_subsample_entries);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
// No clear bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleCypherBytesOnly) {
- GenerateKeyRequest(kSubsampleKeyId, arraysize(kSubsampleKeyId));
- AddKeyAndExpectToSucceed(kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleKey, arraysize(kSubsampleKey));
- std::vector<SubsampleEntry> subsample_entries_cypher_only(
+ GenerateKeyRequest(key_id_);
+ AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+
+ std::vector<SubsampleEntry> cypher_only_subsample_entries(
kSubsampleEntriesCypherOnly,
kSubsampleEntriesCypherOnly + arraysize(kSubsampleEntriesCypherOnly));
- scoped_refptr<DecoderBuffer> encrypted_data = CreateSubsampleEncryptedBuffer(
- kEncryptedData, arraysize(kEncryptedData),
- kSubsampleKeyId, arraysize(kSubsampleKeyId),
- kSubsampleIv, arraysize(kSubsampleIv),
- 0,
- subsample_entries_cypher_only);
- ASSERT_NO_FATAL_FAILURE(DecryptAndExpectToSucceed(encrypted_data,
- kSubsampleOriginalData, kSubsampleOriginalDataSize));
+
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, cypher_only_subsample_entries);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
+}
+
+TEST_F(AesDecryptorTest, JWKKey) {
+ // Try a simple JWK key (i.e. not in a set)
+ const std::string key1 =
+ "{"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ "}";
+ AddKeyAndExpect(key1, KEY_ERROR);
+
+ // Try a key list with multiple entries.
+ const std::string key2 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ " },"
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"JCUmJygpKissLS4vMA==\","
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA==\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key2, KEY_ADDED);
+
+ // Try a key with no spaces and some \n plus additional fields.
+ const std::string key3 =
+ "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
+ "\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM=\",\"k\":\"GawgguFyGrWKav7AX4VKUg="
+ "=\",\"foo\":\"bar\"}]}\n\n";
+ AddKeyAndExpect(key3, KEY_ADDED);
+
+ // Try some non-ASCII characters.
+ AddKeyAndExpect("This is not ASCII due to \xff\xfe\xfd in it.", KEY_ERROR);
+
+ // Try a badly formatted key. Assume that the JSON parser is fully tested,
+ // so we won't try a lot of combinations. However, need a test to ensure
+ // that the code doesn't crash if invalid JSON received.
+ AddKeyAndExpect("This is not a JSON key.", KEY_ERROR);
+
+ // Try passing some valid JSON that is not a dictionary at the top level.
+ AddKeyAndExpect("40", KEY_ERROR);
+
+ // Try an empty dictionary.
+ AddKeyAndExpect("{ }", KEY_ERROR);
+
+ // Try an empty 'keys' dictionary.
+ AddKeyAndExpect("{ \"keys\": [] }", KEY_ERROR);
+
+ // Try with 'keys' not a dictionary.
+ AddKeyAndExpect("{ \"keys\":\"1\" }", KEY_ERROR);
+
+ // Try with 'keys' a list of integers.
+ AddKeyAndExpect("{ \"keys\": [ 1, 2, 3 ] }", KEY_ERROR);
+
+ // Try a key missing padding(=) at end of base64 string.
+ const std::string key4 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key4, KEY_ERROR);
+
+ // Try a key ID missing padding(=) at end of base64 string.
+ const std::string key5 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key5, KEY_ERROR);
+
+ // Try a key with invalid base64 encoding.
+ const std::string key6 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"!@#$%^&*()==\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+ AddKeyAndExpect(key6, KEY_ERROR);
+}
+
+TEST_F(AesDecryptorTest, RawKey) {
+ // Verify that v0.1b keys (raw key) is still supported. Raw keys are
+ // 16 bytes long. Use the undecoded value of |kKey|.
+ GenerateKeyRequest(key_id_);
+ AddRawKeyAndExpect(
+ key_id_, std::vector<uint8>(kKey, kKey + arraysize(kKey)), KEY_ADDED);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_wrapper.cc b/chromium/media/cdm/ppapi/cdm_wrapper.cc
index 6348f1954d0..66ae43d4d65 100644
--- a/chromium/media/cdm/ppapi/cdm_wrapper.cc
+++ b/chromium/media/cdm/ppapi/cdm_wrapper.cc
@@ -498,8 +498,9 @@ class CdmWrapper : public pp::Instance,
// PPP_ContentDecryptor_Private implementation.
// Note: Results of calls to these methods must be reported through the
// PPB_ContentDecryptor_Private interface.
- virtual void GenerateKeyRequest(const std::string& key_system,
- const std::string& type,
+ virtual void Initialize(const std::string& key_system,
+ bool can_challenge_platform) OVERRIDE;
+ virtual void GenerateKeyRequest(const std::string& type,
pp::VarArrayBuffer init_data) OVERRIDE;
virtual void AddKey(const std::string& session_id,
pp::VarArrayBuffer key,
@@ -636,11 +637,25 @@ bool CdmWrapper::CreateCdmInstance(const std::string& key_system) {
return (cdm_ != NULL);
}
-void CdmWrapper::GenerateKeyRequest(const std::string& key_system,
- const std::string& type,
- pp::VarArrayBuffer init_data) {
+void CdmWrapper::Initialize(const std::string& key_system,
+ bool can_challenge_platform) {
PP_DCHECK(!key_system.empty());
- PP_DCHECK(key_system_.empty() || key_system_ == key_system);
+ PP_DCHECK(key_system_.empty() || (key_system_ == key_system && cdm_));
+
+ if (!cdm_) {
+ if (!CreateCdmInstance(key_system)) {
+ // TODO(jrummell): Is UnknownKeyError the correct response?
+ SendUnknownKeyError(key_system, std::string());
+ return;
+ }
+ }
+ PP_DCHECK(cdm_);
+ key_system_ = key_system;
+}
+
+void CdmWrapper::GenerateKeyRequest(const std::string& type,
+ pp::VarArrayBuffer init_data) {
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
#if defined(CHECK_DOCUMENT_URL)
PP_URLComponents_Dev url_components = {};
@@ -652,36 +667,19 @@ void CdmWrapper::GenerateKeyRequest(const std::string& key_system,
PP_DCHECK(0 < url_components.host.len);
#endif // defined(CHECK_DOCUMENT_URL)
- if (!cdm_) {
- if (!CreateCdmInstance(key_system)) {
- SendUnknownKeyError(key_system, std::string());
- return;
- }
- }
- PP_DCHECK(cdm_);
-
- // Must be set here in case the CDM synchronously calls a cdm::Host method.
- // Clear below on error.
- // TODO(ddorwin): Set/clear key_system_ & cdm_ at same time; clear both on
- // error below.
- key_system_ = key_system;
cdm::Status status = cdm_->GenerateKeyRequest(
type.data(), type.size(),
static_cast<const uint8_t*>(init_data.Map()),
init_data.ByteLength());
PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
- if (status != cdm::kSuccess) {
- key_system_.clear(); // See comment above.
- return;
- }
-
- key_system_ = key_system;
+ if (status != cdm::kSuccess)
+ SendUnknownKeyError(key_system_, std::string());
}
void CdmWrapper::AddKey(const std::string& session_id,
pp::VarArrayBuffer key,
pp::VarArrayBuffer init_data) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
if (!cdm_) {
SendUnknownKeyError(key_system_, session_id);
return;
@@ -711,7 +709,7 @@ void CdmWrapper::AddKey(const std::string& session_id,
}
void CdmWrapper::CancelKeyRequest(const std::string& session_id) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
if (!cdm_) {
SendUnknownKeyError(key_system_, session_id);
return;
@@ -729,7 +727,7 @@ void CdmWrapper::CancelKeyRequest(const std::string& session_id) {
void CdmWrapper::Decrypt(pp::Buffer_Dev encrypted_buffer,
const PP_EncryptedBlockInfo& encrypted_block_info) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
PP_DCHECK(!encrypted_buffer.is_null());
// Release a buffer that the caller indicated it is finished with.
@@ -759,7 +757,7 @@ void CdmWrapper::Decrypt(pp::Buffer_Dev encrypted_buffer,
void CdmWrapper::InitializeAudioDecoder(
const PP_AudioDecoderConfig& decoder_config,
pp::Buffer_Dev extra_data_buffer) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
cdm::Status status = cdm::kSessionError;
if (cdm_) {
@@ -786,7 +784,7 @@ void CdmWrapper::InitializeAudioDecoder(
void CdmWrapper::InitializeVideoDecoder(
const PP_VideoDecoderConfig& decoder_config,
pp::Buffer_Dev extra_data_buffer) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
cdm::Status status = cdm::kSessionError;
if (cdm_) {
@@ -815,7 +813,7 @@ void CdmWrapper::InitializeVideoDecoder(
void CdmWrapper::DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
uint32_t request_id) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
if (cdm_) {
cdm_->DeinitializeDecoder(
PpDecryptorStreamTypeToCdmStreamType(decoder_type));
@@ -829,7 +827,7 @@ void CdmWrapper::DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
void CdmWrapper::ResetDecoder(PP_DecryptorStreamType decoder_type,
uint32_t request_id) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
if (cdm_)
cdm_->ResetDecoder(PpDecryptorStreamTypeToCdmStreamType(decoder_type));
@@ -842,7 +840,7 @@ void CdmWrapper::DecryptAndDecode(
PP_DecryptorStreamType decoder_type,
pp::Buffer_Dev encrypted_buffer,
const PP_EncryptedBlockInfo& encrypted_block_info) {
- PP_DCHECK(cdm_); // GenerateKeyRequest() should have succeeded.
+ PP_DCHECK(cdm_); // Initialize() should have succeeded.
// Release a buffer that the caller indicated it is finished with.
allocator_.Release(encrypted_block_info.tracking_info.buffer_id);
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index 9693bbb4de2..72b31252f85 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -291,7 +291,9 @@ static void AVCodecContextToAudioDecoderConfig(
codec_context->extradata,
codec_context->extradata_size,
is_encrypted,
- record_stats);
+ record_stats,
+ base::TimeDelta(),
+ base::TimeDelta());
if (codec != kCodecOpus) {
DCHECK_EQ(av_get_bytes_per_sample(codec_context->sample_fmt) * 8,
config->bits_per_channel());
diff --git a/chromium/media/ffmpeg/ffmpeg_common.h b/chromium/media/ffmpeg/ffmpeg_common.h
index 99e1cc246ec..ccd2aa59756 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.h
+++ b/chromium/media/ffmpeg/ffmpeg_common.h
@@ -95,7 +95,8 @@ ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout,
int channels);
// Converts FFmpeg's audio sample format to Chrome's SampleFormat.
-SampleFormat AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
+MEDIA_EXPORT SampleFormat
+ AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
// Converts FFmpeg's pixel formats to its corresponding supported video format.
VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format);
diff --git a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
index 33ad46ed109..2fa61ace486 100644
--- a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -79,4 +79,22 @@ TEST_F(FFmpegCommonTest, TimeBaseConversions) {
}
}
+TEST_F(FFmpegCommonTest, VerifyFormatSizes) {
+ for (AVSampleFormat format = AV_SAMPLE_FMT_NONE;
+ format < AV_SAMPLE_FMT_NB;
+ format = static_cast<AVSampleFormat>(format + 1)) {
+ SampleFormat sample_format = AVSampleFormatToSampleFormat(format);
+ if (sample_format == kUnknownSampleFormat) {
+ // This format not supported, so skip it.
+ continue;
+ }
+
+ // Have FFMpeg compute the size of a buffer of 1 channel / 1 frame
+ // with 1 byte alignment to make sure the sizes match.
+ int single_buffer_size = av_samples_get_buffer_size(NULL, 1, 1, format, 1);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
+ EXPECT_EQ(bytes_per_channel, single_buffer_size);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
index 19f00f81aef..0b68fd0896c 100644
--- a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
+++ b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
@@ -152,6 +152,11 @@ FFMPEG_TEST_CASE(Cr234630b, "security/234630b.mov", PIPELINE_ERROR_DECODE,
FFMPEG_TEST_CASE(Cr242786, "security/242786.webm", PIPELINE_OK,
PIPELINE_OK, kNullVideoHash,
"-1.72,-0.83,0.84,1.70,1.23,-0.53,");
+// Test for out-of-bounds access with slightly corrupt file (detection logic
+// thinks it's a MONO file, but actually contains STEREO audio).
+FFMPEG_TEST_CASE(Cr275590, "security/275590.m4a",
+ DECODER_ERROR_NOT_SUPPORTED, DEMUXER_ERROR_COULD_NOT_OPEN,
+ kNullVideoHash, kNullAudioHash);
// General MP4 test cases.
FFMPEG_TEST_CASE(MP4_0, "security/aac.10419.mp4", DEMUXER_ERROR_COULD_NOT_OPEN,
@@ -359,7 +364,7 @@ FLAKY_FFMPEG_TEST_CASE(WEBM_2, "security/uninitialize.webm");
TEST_P(FFmpegRegressionTest, BasicPlayback) {
if (GetParam().init_status == PIPELINE_OK) {
ASSERT_TRUE(Start(GetTestDataFilePath(GetParam().filename),
- GetParam().init_status, true));
+ GetParam().init_status, kHashed));
Play();
ASSERT_EQ(WaitUntilEndedOrError(), GetParam().end_status);
EXPECT_EQ(GetParam().video_md5, GetVideoHash());
@@ -374,7 +379,7 @@ TEST_P(FFmpegRegressionTest, BasicPlayback) {
}
} else {
ASSERT_FALSE(Start(GetTestDataFilePath(GetParam().filename),
- GetParam().init_status, true));
+ GetParam().init_status, kHashed));
EXPECT_EQ(GetParam().video_md5, GetVideoHash());
EXPECT_EQ(GetParam().audio_md5, GetAudioHash());
}
diff --git a/chromium/media/ffmpeg/ffmpeg_unittest.cc b/chromium/media/ffmpeg/ffmpeg_unittest.cc
index d774a0605ab..255d2aad47f 100644
--- a/chromium/media/ffmpeg/ffmpeg_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_unittest.cc
@@ -17,9 +17,9 @@
#include "base/files/memory_mapped_file.h"
#include "base/memory/scoped_ptr.h"
#include "base/path_service.h"
-#include "base/perftimer.h"
#include "base/strings/string_util.h"
#include "base/test/perf_test_suite.h"
+#include "base/test/perf_time_logger.h"
#include "media/base/media.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
@@ -424,27 +424,27 @@ FFMPEG_TEST_CASE(counting, ogv);
TEST_P(FFmpegTest, Perf) {
{
- PerfTimeLogger timer("Opening file");
+ base::PerfTimeLogger timer("Opening file");
OpenFile(GetParam());
}
{
- PerfTimeLogger timer("Opening codecs");
+ base::PerfTimeLogger timer("Opening codecs");
OpenCodecs();
}
{
- PerfTimeLogger timer("Reading file");
+ base::PerfTimeLogger timer("Reading file");
ReadRemainingFile();
}
if (has_audio()) {
- PerfTimeLogger timer("Decoding audio");
+ base::PerfTimeLogger timer("Decoding audio");
DecodeRemainingAudio();
}
if (has_video()) {
- PerfTimeLogger timer("Decoding video");
+ base::PerfTimeLogger timer("Decoding video");
DecodeRemainingVideo();
}
{
- PerfTimeLogger timer("Seeking to zero");
+ base::PerfTimeLogger timer("Seeking to zero");
SeekTo(0);
}
}
diff --git a/chromium/media/filters/audio_file_reader_unittest.cc b/chromium/media/filters/audio_file_reader_unittest.cc
index 7ce37ff2720..bf4acd176c2 100644
--- a/chromium/media/filters/audio_file_reader_unittest.cc
+++ b/chromium/media/filters/audio_file_reader_unittest.cc
@@ -110,7 +110,7 @@ TEST_F(AudioFileReaderTest, WaveF32LE) {
base::TimeDelta::FromMicroseconds(288414), 12719, 12719);
}
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
TEST_F(AudioFileReaderTest, MP3) {
RunTest("sfx.mp3", "3.05,2.87,3.00,3.32,3.58,4.08,", 1, 44100,
base::TimeDelta::FromMicroseconds(313470), 13824, 12719);
diff --git a/chromium/media/filters/audio_renderer_algorithm.cc b/chromium/media/filters/audio_renderer_algorithm.cc
index 97f08113841..572e2630a3d 100644
--- a/chromium/media/filters/audio_renderer_algorithm.cc
+++ b/chromium/media/filters/audio_renderer_algorithm.cc
@@ -12,41 +12,78 @@
#include "media/audio/audio_util.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
+#include "media/filters/wsola_internals.h"
namespace media {
-// The starting size in frames for |audio_buffer_|. Previous usage maintained a
-// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we
-// maintain this number of frames.
-static const int kStartingBufferSizeInFrames = 16 * 512;
+
+// Waveform Similarity Overlap-and-add (WSOLA).
+//
+// One WSOLA iteration
+//
+// 1) Extract |target_block_| as input frames at indices
+// [|target_block_index_|, |target_block_index_| + |ola_window_size_|).
+// Note that |target_block_| is the "natural" continuation of the output.
+//
+// 2) Extract |search_block_| as input frames at indices
+// [|search_block_index_|,
+// |search_block_index_| + |num_candidate_blocks_| + |ola_window_size_|).
+//
+// 3) Find a block within the |search_block_| that is most similar
+// to |target_block_|. Let |optimal_index| be the index of such block and
+// write it to |optimal_block_|.
+//
+// 4) Update:
+// |optimal_block_| = |transition_window_| * |target_block_| +
+// (1 - |transition_window_|) * |optimal_block_|.
+//
+// 5) Overlap-and-add |optimal_block_| to the |wsola_output_|.
+//
+// 6) Update:
+// |target_block_| = |optimal_index| + |ola_window_size_| / 2.
+// |output_index_| = |output_index_| + |ola_window_size_| / 2,
+// |search_block_center_offset_| = |output_index_| * |playback_rate_|, and
+// |search_block_index_| = |search_block_center_offset_| -
+// |search_block_center_offset_|.
// The maximum size in frames for the |audio_buffer_|. Arbitrarily determined.
// This number represents 3 seconds of 96kHz/16 bit 7.1 surround sound.
static const int kMaxBufferSizeInFrames = 3 * 96000;
-// Duration of audio segments used for crossfading (in seconds).
-static const double kWindowDuration = 0.08;
-
-// Duration of crossfade between audio segments (in seconds).
-static const double kCrossfadeDuration = 0.008;
-
// Max/min supported playback rates for fast/slow audio. Audio outside of these
// ranges are muted.
// Audio at these speeds would sound better under a frequency domain algorithm.
static const float kMinPlaybackRate = 0.5f;
static const float kMaxPlaybackRate = 4.0f;
+// Overlap-and-add window size in milliseconds.
+static const int kOlaWindowSizeMs = 20;
+
+// Size of search interval in milliseconds. The search interval is
+// [-delta delta] around |output_index_| * |playback_rate_|. So the search
+// interval is 2 * delta.
+static const int kWsolaSearchIntervalMs = 30;
+
+// The starting size in frames for |audio_buffer_|. Previous usage maintained a
+// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we
+// maintain this number of frames.
+static const int kStartingBufferSizeInFrames = 16 * 512;
+
AudioRendererAlgorithm::AudioRendererAlgorithm()
: channels_(0),
samples_per_second_(0),
playback_rate_(0),
- frames_in_crossfade_(0),
- index_into_window_(0),
- crossfade_frame_number_(0),
muted_(false),
muted_partial_frame_(0),
- window_size_(0),
- capacity_(kStartingBufferSizeInFrames) {
+ capacity_(kStartingBufferSizeInFrames),
+ output_time_(0.0),
+ search_block_center_offset_(0),
+ search_block_index_(0),
+ num_candidate_blocks_(0),
+ target_block_index_(0),
+ ola_window_size_(0),
+ ola_hop_size_(0),
+ num_complete_frames_(0) {
}
AudioRendererAlgorithm::~AudioRendererAlgorithm() {}
@@ -58,16 +95,59 @@ void AudioRendererAlgorithm::Initialize(float initial_playback_rate,
channels_ = params.channels();
samples_per_second_ = params.sample_rate();
SetPlaybackRate(initial_playback_rate);
-
- window_size_ = samples_per_second_ * kWindowDuration;
- frames_in_crossfade_ = samples_per_second_ * kCrossfadeDuration;
- crossfade_buffer_ = AudioBus::Create(channels_, frames_in_crossfade_);
+ num_candidate_blocks_ = (kWsolaSearchIntervalMs * samples_per_second_) / 1000;
+ ola_window_size_ = kOlaWindowSizeMs * samples_per_second_ / 1000;
+
+ // Make sure window size in an even number.
+ ola_window_size_ += ola_window_size_ & 1;
+ ola_hop_size_ = ola_window_size_ / 2;
+
+ // |num_candidate_blocks_| / 2 is the offset of the center of the search
+ // block to the center of the first (left most) candidate block. The offset
+ // of the center of a candidate block to its left most point is
+ // |ola_window_size_| / 2 - 1. Note that |ola_window_size_| is even and in
+ // our convention the center belongs to the left half, so we need to subtract
+ // one frame to get the correct offset.
+ //
+ // Search Block
+ // <------------------------------------------->
+ //
+ // |ola_window_size_| / 2 - 1
+ // <----
+ //
+ // |num_candidate_blocks_| / 2
+ // <----------------
+ // center
+ // X----X----------------X---------------X-----X
+ // <----------> <---------->
+ // Candidate ... Candidate
+ // 1, ... |num_candidate_blocks_|
+ search_block_center_offset_ = num_candidate_blocks_ / 2 +
+ (ola_window_size_ / 2 - 1);
+
+ ola_window_.reset(new float[ola_window_size_]);
+ internal::GetSymmetricHanningWindow(ola_window_size_, ola_window_.get());
+
+ transition_window_.reset(new float[ola_window_size_ * 2]);
+ internal::GetSymmetricHanningWindow(2 * ola_window_size_,
+ transition_window_.get());
+
+ wsola_output_ = AudioBus::Create(channels_, ola_window_size_ + ola_hop_size_);
+ wsola_output_->Zero(); // Initialize for overlap-and-add of the first block.
+
+ // Auxiliary containers.
+ optimal_block_ = AudioBus::Create(channels_, ola_window_size_);
+ search_block_ = AudioBus::Create(
+ channels_, num_candidate_blocks_ + (ola_window_size_ - 1));
+ target_block_ = AudioBus::Create(channels_, ola_window_size_);
}
int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) {
if (playback_rate_ == 0)
return 0;
+ DCHECK_EQ(channels_, dest->channels());
+
// Optimize the |muted_| case to issue a single clear instead of performing
// the full crossfade and clearing each crossfaded frame.
if (muted_) {
@@ -93,12 +173,12 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) {
return frames_to_render;
}
- int slower_step = ceil(window_size_ * playback_rate_);
- int faster_step = ceil(window_size_ / playback_rate_);
+ int slower_step = ceil(ola_window_size_ * playback_rate_);
+ int faster_step = ceil(ola_window_size_ / playback_rate_);
// Optimize the most common |playback_rate_| ~= 1 case to use a single copy
// instead of copying frame by frame.
- if (window_size_ <= faster_step && slower_step >= window_size_) {
+ if (ola_window_size_ <= faster_step && slower_step >= ola_window_size_) {
const int frames_to_copy =
std::min(audio_buffer_.frames(), requested_frames);
const int frames_read = audio_buffer_.ReadFrames(frames_to_copy, 0, dest);
@@ -106,277 +186,201 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) {
return frames_read;
}
- int total_frames_rendered = 0;
- while (total_frames_rendered < requested_frames) {
- if (index_into_window_ >= window_size_)
- ResetWindow();
-
- int rendered_frames = 0;
- if (window_size_ > faster_step) {
- rendered_frames =
- OutputFasterPlayback(dest,
- total_frames_rendered,
- requested_frames - total_frames_rendered,
- window_size_,
- faster_step);
- } else if (slower_step < window_size_) {
- rendered_frames =
- OutputSlowerPlayback(dest,
- total_frames_rendered,
- requested_frames - total_frames_rendered,
- slower_step,
- window_size_);
- } else {
- NOTREACHED();
- }
-
- if (rendered_frames == 0)
- break;
-
- total_frames_rendered += rendered_frames;
- }
- return total_frames_rendered;
+ int rendered_frames = 0;
+ do {
+ rendered_frames += WriteCompletedFramesTo(
+ requested_frames - rendered_frames, rendered_frames, dest);
+ } while (rendered_frames < requested_frames && RunOneWsolaIteration());
+ return rendered_frames;
}
-void AudioRendererAlgorithm::ResetWindow() {
- DCHECK_LE(index_into_window_, window_size_);
- index_into_window_ = 0;
- crossfade_frame_number_ = 0;
+void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) {
+ DCHECK_GE(new_rate, 0);
+ playback_rate_ = new_rate;
+ muted_ =
+ playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate;
}
-int AudioRendererAlgorithm::OutputFasterPlayback(AudioBus* dest,
- int dest_offset,
- int requested_frames,
- int input_step,
- int output_step) {
- // Ensure we don't run into OOB read/write situation.
- CHECK_GT(input_step, output_step);
- DCHECK_LT(index_into_window_, window_size_);
- DCHECK_GT(playback_rate_, 1.0);
- DCHECK(!muted_);
-
- if (audio_buffer_.frames() < 1)
- return 0;
-
- // The audio data is output in a series of windows. For sped-up playback,
- // the window is comprised of the following phases:
- //
- // a) Output raw data.
- // b) Save bytes for crossfade in |crossfade_buffer_|.
- // c) Drop data.
- // d) Output crossfaded audio leading up to the next window.
- //
- // The duration of each phase is computed below based on the |window_size_|
- // and |playback_rate_|.
- DCHECK_LE(frames_in_crossfade_, output_step);
-
- // This is the index of the end of phase a, beginning of phase b.
- int outtro_crossfade_begin = output_step - frames_in_crossfade_;
-
- // This is the index of the end of phase b, beginning of phase c.
- int outtro_crossfade_end = output_step;
-
- // This is the index of the end of phase c, beginning of phase d.
- // This phase continues until |index_into_window_| reaches |window_size_|, at
- // which point the window restarts.
- int intro_crossfade_begin = input_step - frames_in_crossfade_;
-
- // a) Output raw frames if we haven't reached the crossfade section.
- if (index_into_window_ < outtro_crossfade_begin) {
- // Read as many frames as we can and return the count. If it's not enough,
- // we will get called again.
- const int frames_to_copy =
- std::min(requested_frames, outtro_crossfade_begin - index_into_window_);
- int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest);
- index_into_window_ += copied;
- return copied;
- }
-
- // b) Save outtro crossfade frames into intermediate buffer, but do not output
- // anything to |dest|.
- if (index_into_window_ < outtro_crossfade_end) {
- // This phase only applies if there are bytes to crossfade.
- DCHECK_GT(frames_in_crossfade_, 0);
- int crossfade_start = index_into_window_ - outtro_crossfade_begin;
- int crossfade_count = outtro_crossfade_end - index_into_window_;
- int copied = audio_buffer_.ReadFrames(
- crossfade_count, crossfade_start, crossfade_buffer_.get());
- index_into_window_ += copied;
-
- // Did we get all the frames we need? If not, return and let subsequent
- // calls try to get the rest.
- if (copied != crossfade_count)
- return 0;
- }
-
- // c) Drop frames until we reach the intro crossfade section.
- if (index_into_window_ < intro_crossfade_begin) {
- // Check if there is enough data to skip all the frames needed. If not,
- // return 0 and let subsequent calls try to skip it all.
- int seek_frames = intro_crossfade_begin - index_into_window_;
- if (audio_buffer_.frames() < seek_frames)
- return 0;
- audio_buffer_.SeekFrames(seek_frames);
+void AudioRendererAlgorithm::FlushBuffers() {
+ // Clear the queue of decoded packets (releasing the buffers).
+ audio_buffer_.Clear();
+ output_time_ = 0.0;
+ search_block_index_ = 0;
+ target_block_index_ = 0;
+ wsola_output_->Zero();
+ num_complete_frames_ = 0;
+}
- // We've dropped all the frames that need to be dropped.
- index_into_window_ += seek_frames;
- }
+base::TimeDelta AudioRendererAlgorithm::GetTime() {
+ return audio_buffer_.current_time();
+}
- // d) Crossfade and output a frame, as long as we have data.
- if (audio_buffer_.frames() < 1)
- return 0;
- DCHECK_GT(frames_in_crossfade_, 0);
- DCHECK_LT(index_into_window_, window_size_);
-
- int offset_into_buffer = index_into_window_ - intro_crossfade_begin;
- int copied = audio_buffer_.ReadFrames(1, dest_offset, dest);
- DCHECK_EQ(copied, 1);
- CrossfadeFrame(crossfade_buffer_.get(),
- offset_into_buffer,
- dest,
- dest_offset,
- offset_into_buffer);
- index_into_window_ += copied;
- return copied;
+void AudioRendererAlgorithm::EnqueueBuffer(
+ const scoped_refptr<AudioBuffer>& buffer_in) {
+ DCHECK(!buffer_in->end_of_stream());
+ audio_buffer_.Append(buffer_in);
}
-int AudioRendererAlgorithm::OutputSlowerPlayback(AudioBus* dest,
- int dest_offset,
- int requested_frames,
- int input_step,
- int output_step) {
- // Ensure we don't run into OOB read/write situation.
- CHECK_LT(input_step, output_step);
- DCHECK_LT(index_into_window_, window_size_);
- DCHECK_LT(playback_rate_, 1.0);
- DCHECK_NE(playback_rate_, 0);
- DCHECK(!muted_);
-
- if (audio_buffer_.frames() < 1)
- return 0;
+bool AudioRendererAlgorithm::IsQueueFull() {
+ return audio_buffer_.frames() >= capacity_;
+}
- // The audio data is output in a series of windows. For slowed down playback,
- // the window is comprised of the following phases:
- //
- // a) Output raw data.
- // b) Output and save bytes for crossfade in |crossfade_buffer_|.
- // c) Output* raw data.
- // d) Output* crossfaded audio leading up to the next window.
- //
- // * Phases c) and d) do not progress |audio_buffer_|'s cursor so that the
- // |audio_buffer_|'s cursor is in the correct place for the next window.
- //
- // The duration of each phase is computed below based on the |window_size_|
- // and |playback_rate_|.
- DCHECK_LE(frames_in_crossfade_, input_step);
+void AudioRendererAlgorithm::IncreaseQueueCapacity() {
+ capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames);
+}
- // This is the index of the end of phase a, beginning of phase b.
- int intro_crossfade_begin = input_step - frames_in_crossfade_;
+bool AudioRendererAlgorithm::CanPerformWsola() const {
+ const int search_block_size = num_candidate_blocks_ + (ola_window_size_ - 1);
+ const int frames = audio_buffer_.frames();
+ return target_block_index_ + ola_window_size_ <= frames &&
+ search_block_index_ + search_block_size <= frames;
+}
- // This is the index of the end of phase b, beginning of phase c.
- int intro_crossfade_end = input_step;
+bool AudioRendererAlgorithm::RunOneWsolaIteration() {
+ if (!CanPerformWsola())
+ return false;
- // This is the index of the end of phase c, beginning of phase d.
- // This phase continues until |index_into_window_| reaches |window_size_|, at
- // which point the window restarts.
- int outtro_crossfade_begin = output_step - frames_in_crossfade_;
+ GetOptimalBlock();
- // a) Output raw frames.
- if (index_into_window_ < intro_crossfade_begin) {
- // Read as many frames as we can and return the count. If it's not enough,
- // we will get called again.
- const int frames_to_copy =
- std::min(requested_frames, intro_crossfade_begin - index_into_window_);
- int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest);
- index_into_window_ += copied;
- return copied;
- }
+ // Overlap-and-add.
+ for (int k = 0; k < channels_; ++k) {
+ const float* const ch_opt_frame = optimal_block_->channel(k);
+ float* ch_output = wsola_output_->channel(k) + num_complete_frames_;
+ for (int n = 0; n < ola_hop_size_; ++n) {
+ ch_output[n] = ch_output[n] * ola_window_[ola_hop_size_ + n] +
+ ch_opt_frame[n] * ola_window_[n];
+ }
- // b) Save the raw frames for the intro crossfade section, then copy the
- // same frames to |dest|.
- if (index_into_window_ < intro_crossfade_end) {
- const int frames_to_copy =
- std::min(requested_frames, intro_crossfade_end - index_into_window_);
- int offset = index_into_window_ - intro_crossfade_begin;
- int copied = audio_buffer_.ReadFrames(
- frames_to_copy, offset, crossfade_buffer_.get());
- crossfade_buffer_->CopyPartialFramesTo(offset, copied, dest_offset, dest);
- index_into_window_ += copied;
- return copied;
+ // Copy the second half to the output.
+ memcpy(&ch_output[ola_hop_size_], &ch_opt_frame[ola_hop_size_],
+ sizeof(*ch_opt_frame) * ola_hop_size_);
}
- // c) Output a raw frame into |dest| without advancing the |audio_buffer_|
- // cursor.
- int audio_buffer_offset = index_into_window_ - intro_crossfade_end;
- DCHECK_GE(audio_buffer_offset, 0);
- if (audio_buffer_.frames() <= audio_buffer_offset)
- return 0;
- int copied =
- audio_buffer_.PeekFrames(1, audio_buffer_offset, dest_offset, dest);
- DCHECK_EQ(1, copied);
-
- // d) Crossfade the next frame of |crossfade_buffer_| into |dest| if we've
- // reached the outtro crossfade section of the window.
- if (index_into_window_ >= outtro_crossfade_begin) {
- int offset_into_crossfade_buffer =
- index_into_window_ - outtro_crossfade_begin;
- CrossfadeFrame(dest,
- dest_offset,
- crossfade_buffer_.get(),
- offset_into_crossfade_buffer,
- offset_into_crossfade_buffer);
- }
+ num_complete_frames_ += ola_hop_size_;
+ UpdateOutputTime(ola_hop_size_);
+ RemoveOldInputFrames();
+ return true;
+}
- index_into_window_ += copied;
- return copied;
+void AudioRendererAlgorithm::UpdateOutputTime(double time_change) {
+ output_time_ += time_change;
+ // Center of the search region, in frames.
+ const int search_block_center_index = static_cast<int>(
+ output_time_ * playback_rate_ + 0.5);
+ search_block_index_ = search_block_center_index - search_block_center_offset_;
}
-void AudioRendererAlgorithm::CrossfadeFrame(AudioBus* intro,
- int intro_offset,
- AudioBus* outtro,
- int outtro_offset,
- int fade_offset) {
- float crossfade_ratio =
- static_cast<float>(fade_offset) / frames_in_crossfade_;
- for (int channel = 0; channel < channels_; ++channel) {
- outtro->channel(channel)[outtro_offset] =
- (1.0f - crossfade_ratio) * intro->channel(channel)[intro_offset] +
- (crossfade_ratio) * outtro->channel(channel)[outtro_offset];
- }
+void AudioRendererAlgorithm::RemoveOldInputFrames() {
+ const int earliest_used_index = std::min(target_block_index_,
+ search_block_index_);
+ if (earliest_used_index <= 0)
+ return; // Nothing to remove.
+
+ // Remove frames from input and adjust indices accordingly.
+ audio_buffer_.SeekFrames(earliest_used_index);
+ target_block_index_ -= earliest_used_index;
+
+ // Adjust output index.
+ double output_time_change = static_cast<double>(earliest_used_index) /
+ playback_rate_;
+ CHECK_GE(output_time_, output_time_change);
+ UpdateOutputTime(-output_time_change);
}
-void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) {
- DCHECK_GE(new_rate, 0);
- playback_rate_ = new_rate;
- muted_ =
- playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate;
+int AudioRendererAlgorithm::WriteCompletedFramesTo(
+ int requested_frames, int dest_offset, AudioBus* dest) {
+ int rendered_frames = std::min(num_complete_frames_, requested_frames);
- ResetWindow();
-}
+ if (rendered_frames == 0)
+ return 0; // There is nothing to read from |wsola_output_|, return.
-void AudioRendererAlgorithm::FlushBuffers() {
- ResetWindow();
+ wsola_output_->CopyPartialFramesTo(0, rendered_frames, dest_offset, dest);
- // Clear the queue of decoded packets (releasing the buffers).
- audio_buffer_.Clear();
+ // Remove the frames which are read.
+ int frames_to_move = wsola_output_->frames() - rendered_frames;
+ for (int k = 0; k < channels_; ++k) {
+ float* ch = wsola_output_->channel(k);
+ memmove(ch, &ch[rendered_frames], sizeof(*ch) * frames_to_move);
+ }
+ num_complete_frames_ -= rendered_frames;
+ return rendered_frames;
}
-base::TimeDelta AudioRendererAlgorithm::GetTime() {
- return audio_buffer_.current_time();
-}
+bool AudioRendererAlgorithm::TargetIsWithinSearchRegion() const {
+ const int search_block_size = num_candidate_blocks_ + (ola_window_size_ - 1);
-void AudioRendererAlgorithm::EnqueueBuffer(
- const scoped_refptr<AudioBuffer>& buffer_in) {
- DCHECK(!buffer_in->end_of_stream());
- audio_buffer_.Append(buffer_in);
+ return target_block_index_ >= search_block_index_ &&
+ target_block_index_ + ola_window_size_ <=
+ search_block_index_ + search_block_size;
}
-bool AudioRendererAlgorithm::IsQueueFull() {
- return audio_buffer_.frames() >= capacity_;
+void AudioRendererAlgorithm::GetOptimalBlock() {
+ int optimal_index = 0;
+
+ // An interval around last optimal block which is excluded from the search.
+ // This is to reduce the buzzy sound. The number 160 is rather arbitrary and
+ // derived heuristically.
+ const int kExcludeIntervalLengthFrames = 160;
+ if (TargetIsWithinSearchRegion()) {
+ optimal_index = target_block_index_;
+ PeekAudioWithZeroPrepend(optimal_index, optimal_block_.get());
+ } else {
+ PeekAudioWithZeroPrepend(target_block_index_, target_block_.get());
+ PeekAudioWithZeroPrepend(search_block_index_, search_block_.get());
+ int last_optimal = target_block_index_ - ola_hop_size_ -
+ search_block_index_;
+ internal::Interval exclude_iterval = std::make_pair(
+ last_optimal - kExcludeIntervalLengthFrames / 2,
+ last_optimal + kExcludeIntervalLengthFrames / 2);
+
+ // |optimal_index| is in frames and it is relative to the beginning of the
+ // |search_block_|.
+ optimal_index = internal::OptimalIndex(
+ search_block_.get(), target_block_.get(), exclude_iterval);
+
+ // Translate |index| w.r.t. the beginning of |audio_buffer_| and extract the
+ // optimal block.
+ optimal_index += search_block_index_;
+ PeekAudioWithZeroPrepend(optimal_index, optimal_block_.get());
+
+ // Make a transition from target block to the optimal block if different.
+ // Target block has the best continuation to the current output.
+ // Optimal block is the most similar block to the target, however, it might
+ // introduce some discontinuity when over-lap-added. Therefore, we combine
+ // them for a smoother transition. The length of transition window is twice
+ // as that of the optimal-block which makes it like a weighting function
+ // where target-block has higher weight close to zero (weight of 1 at index
+ // 0) and lower weight close the end.
+ for (int k = 0; k < channels_; ++k) {
+ float* ch_opt = optimal_block_->channel(k);
+ const float* const ch_target = target_block_->channel(k);
+ for (int n = 0; n < ola_window_size_; ++n) {
+ ch_opt[n] = ch_opt[n] * transition_window_[n] + ch_target[n] *
+ transition_window_[ola_window_size_ + n];
+ }
+ }
+ }
+
+ // Next target is one hop ahead of the current optimal.
+ target_block_index_ = optimal_index + ola_hop_size_;
}
-void AudioRendererAlgorithm::IncreaseQueueCapacity() {
- capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames);
+void AudioRendererAlgorithm::PeekAudioWithZeroPrepend(
+ int read_offset_frames, AudioBus* dest) {
+ CHECK_LE(read_offset_frames + dest->frames(), audio_buffer_.frames());
+
+ int write_offset = 0;
+ int num_frames_to_read = dest->frames();
+ if (read_offset_frames < 0) {
+ int num_zero_frames_appended = std::min(-read_offset_frames,
+ num_frames_to_read);
+ read_offset_frames = 0;
+ num_frames_to_read -= num_zero_frames_appended;
+ write_offset = num_zero_frames_appended;
+ dest->ZeroFrames(num_zero_frames_appended);
+ }
+ audio_buffer_.PeekFrames(num_frames_to_read, read_offset_frames,
+ write_offset, dest);
}
} // namespace media
diff --git a/chromium/media/filters/audio_renderer_algorithm.h b/chromium/media/filters/audio_renderer_algorithm.h
index 26790b996ac..39e4db6ca69 100644
--- a/chromium/media/filters/audio_renderer_algorithm.h
+++ b/chromium/media/filters/audio_renderer_algorithm.h
@@ -12,11 +12,15 @@
// This class is *not* thread-safe. Calls to enqueue and retrieve data must be
// locked if called from multiple threads.
//
-// AudioRendererAlgorithm uses a simple pitch-preservation algorithm to
-// stretch and compress audio data to meet playback speeds less than and
-// greater than the natural playback of the audio stream.
+// AudioRendererAlgorithm uses the Waveform Similarity Overlap and Add (WSOLA)
+// algorithm to stretch or compress audio data to meet playback speeds less than
+// or greater than the natural playback of the audio stream. The algorithm
+// preserves local properties of the audio, therefore, pitch and harmonics are
+// are preserved. See audio_renderer_algorith.cc for a more elaborate
+// description of the algorithm.
//
// Audio at very low or very high playback rates are muted to preserve quality.
+//
#ifndef MEDIA_FILTERS_AUDIO_RENDERER_ALGORITHM_H_
#define MEDIA_FILTERS_AUDIO_RENDERER_ALGORITHM_H_
@@ -84,46 +88,45 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
bool is_muted() { return muted_; }
private:
- // Fills |dest| with up to |requested_frames| frames of audio data at faster
- // than normal speed. Returns the number of frames inserted into |dest|. If
- // not enough data available, returns 0.
- //
- // When the audio playback is > 1.0, we use a variant of Overlap-Add to squish
- // audio output while preserving pitch. Essentially, we play a bit of audio
- // data at normal speed, then we "fast forward" by dropping the next bit of
- // audio data, and then we stich the pieces together by crossfading from one
- // audio chunk to the next.
- int OutputFasterPlayback(AudioBus* dest,
- int dest_offset,
- int requested_frames,
- int input_step,
- int output_step);
-
- // Fills |dest| with up to |requested_frames| frames of audio data at slower
- // than normal speed. Returns the number of frames inserted into |dest|. If
- // not enough data available, returns 0.
- //
- // When the audio playback is < 1.0, we use a variant of Overlap-Add to
- // stretch audio output while preserving pitch. This works by outputting a
- // segment of audio data at normal speed. The next audio segment then starts
- // by repeating some of the audio data from the previous audio segment.
- // Segments are stiched together by crossfading from one audio chunk to the
- // next.
- int OutputSlowerPlayback(AudioBus* dest,
- int dest_offset,
- int requested_frames,
- int input_step,
- int output_step);
-
- // Resets the window state to the start of a new window.
- void ResetWindow();
-
- // Does a linear crossfade from |intro| into |outtro| for one frame.
- void CrossfadeFrame(AudioBus* intro,
- int intro_offset,
- AudioBus* outtro,
- int outtro_offset,
- int fade_offset);
+ // Within |search_block_|, find the block of data that is most similar to
+ // |target_block_|, and write it in |optimal_block_|. This method assumes that
+ // there is enough data to perform a search, i.e. |search_block_| and
+ // |target_block_| can be extracted from the available frames.
+ void GetOptimalBlock();
+
+ // Read a maximum of |requested_frames| frames from |wsola_output_|. Returns
+ // number of frames actually read.
+ int WriteCompletedFramesTo(
+ int requested_frames, int output_offset, AudioBus* dest);
+
+ // Fill |dest| with frames from |audio_buffer_| starting from frame
+ // |read_offset_frames|. |dest| is expected to have the same number of
+ // channels as |audio_buffer_|. A negative offset, i.e.
+ // |read_offset_frames| < 0, is accepted assuming that |audio_buffer| is zero
+ // for negative indices. This might happen for few first frames. This method
+ // assumes there is enough frames to fill |dest|, i.e. |read_offset_frames| +
+ // |dest->frames()| does not extend to future.
+ void PeekAudioWithZeroPrepend(int read_offset_frames, AudioBus* dest);
+
+ // Run one iteration of WSOLA, if there are sufficient frames. This will
+ // overlap-and-add one block to |wsola_output_|, hence, |num_complete_frames_|
+ // is incremented by |ola_hop_size_|.
+ bool RunOneWsolaIteration();
+
+ // Seek |audio_buffer_| forward to remove frames from input that are not used
+ // any more. State of the WSOLA will be updated accordingly.
+ void RemoveOldInputFrames();
+
+ // Update |output_time_| by |time_change|. In turn |search_block_index_| is
+ // updated.
+ void UpdateOutputTime(double time_change);
+
+ // Is |target_block_| fully within |search_block_|? If so, we don't need to
+ // perform the search.
+ bool TargetIsWithinSearchRegion() const;
+
+ // Do we have enough data to perform one round of WSOLA?
+ bool CanPerformWsola() const;
// Number of channels in audio stream.
int channels_;
@@ -137,32 +140,79 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Buffered audio data.
AudioBufferQueue audio_buffer_;
- // Length for crossfade in frames.
- int frames_in_crossfade_;
-
- // The current location in the audio window, between 0 and |window_size_|.
- // When |index_into_window_| reaches |window_size_|, the window resets.
- // Indexed by frame.
- int index_into_window_;
-
- // The frame number in the crossfade.
- int crossfade_frame_number_;
-
// True if the audio should be muted.
bool muted_;
// If muted, keep track of partial frames that should have been skipped over.
double muted_partial_frame_;
- // Temporary buffer to hold crossfade data.
- scoped_ptr<AudioBus> crossfade_buffer_;
-
- // Window size, in frames (calculated from audio properties).
- int window_size_;
-
// How many frames to have in the queue before we report the queue is full.
int capacity_;
+ // Book keeping of the current time of generated audio, in frames. This
+ // should be appropriately updated when out samples are generated, regardless
+ // of whether we push samples out when FillBuffer() is called or we store
+ // audio in |wsola_output_| for the subsequent calls to FillBuffer().
+ // Furthermore, if samples from |audio_buffer_| are evicted then this
+ // member variable should be updated based on |playback_rate_|.
+ // Note that this member should be updated ONLY by calling UpdateOutputTime(),
+ // so that |search_block_index_| is update accordingly.
+ double output_time_;
+
+ // The offset of the center frame of |search_block_| w.r.t. its first frame.
+ int search_block_center_offset_;
+
+ // Index of the beginning of the |search_block_|, in frames.
+ int search_block_index_;
+
+ // Number of Blocks to search to find the most similar one to the target
+ // frame.
+ int num_candidate_blocks_;
+
+ // Index of the beginning of the target block, counted in frames.
+ int target_block_index_;
+
+ // Overlap-and-add window size in frames.
+ int ola_window_size_;
+
+ // The hop size of overlap-and-add in frames. This implementation assumes 50%
+ // overlap-and-add.
+ int ola_hop_size_;
+
+ // Number of frames in |wsola_output_| that overlap-and-add is completed for
+ // them and can be copied to output if FillBuffer() is called. It also
+ // specifies the index where the next WSOLA window has to overlap-and-add.
+ int num_complete_frames_;
+
+ // This stores a part of the output that is created but couldn't be rendered.
+ // Output is generated frame-by-frame which at some point might exceed the
+ // number of requested samples. Furthermore, due to overlap-and-add,
+ // the last half-window of the output is incomplete, which is stored in this
+ // buffer.
+ scoped_ptr<AudioBus> wsola_output_;
+
+ // Overlap-and-add window.
+ scoped_ptr<float[]> ola_window_;
+
+ // Transition window, used to update |optimal_block_| by a weighted sum of
+ // |optimal_block_| and |target_block_|.
+ scoped_ptr<float[]> transition_window_;
+
+ // Auxiliary variables to avoid allocation in every iteration.
+
+ // Stores the optimal block in every iteration. This is the most
+ // similar block to |target_block_| within |search_block_| and it is
+ // overlap-and-added to |wsola_output_|.
+ scoped_ptr<AudioBus> optimal_block_;
+
+ // A block of data that search is performed over to find the |optimal_block_|.
+ scoped_ptr<AudioBus> search_block_;
+
+ // Stores the target block, denoted as |target| above. |search_block_| is
+ // searched for a block (|optimal_block_|) that is most similar to
+ // |target_block_|.
+ scoped_ptr<AudioBus> target_block_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRendererAlgorithm);
};
diff --git a/chromium/media/filters/audio_renderer_algorithm_unittest.cc b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
index d5119c00c2b..649e0588498 100644
--- a/chromium/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
@@ -8,16 +8,20 @@
// correct rate. We always pass in a very large destination buffer with the
// expectation that FillBuffer() will fill as much as it can but no more.
+#include <algorithm> // For std::min().
#include <cmath>
+#include <vector>
#include "base/bind.h"
#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/buffers.h"
#include "media/base/channel_layout.h"
#include "media/base/test_helpers.h"
#include "media/filters/audio_renderer_algorithm.h"
+#include "media/filters/wsola_internals.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -25,6 +29,41 @@ namespace media {
static const int kFrameSize = 250;
static const int kSamplesPerSecond = 3000;
static const SampleFormat kSampleFormat = kSampleFormatS16;
+static const int kOutputDurationInSec = 10;
+
+static void FillWithSquarePulseTrain(
+ int half_pulse_width, int offset, int num_samples, float* data) {
+ ASSERT_GE(offset, 0);
+ ASSERT_LE(offset, num_samples);
+
+ // Fill backward from |offset| - 1 toward zero, starting with -1, alternating
+ // between -1 and 1 every |pulse_width| samples.
+ float pulse = -1.0f;
+ for (int n = offset - 1, k = 0; n >= 0; --n, ++k) {
+ if (k >= half_pulse_width) {
+ pulse = -pulse;
+ k = 0;
+ }
+ data[n] = pulse;
+ }
+
+ // Fill forward from |offset| towards the end, starting with 1, alternating
+ // between 1 and -1 every |pulse_width| samples.
+ pulse = 1.0f;
+ for (int n = offset, k = 0; n < num_samples; ++n, ++k) {
+ if (k >= half_pulse_width) {
+ pulse = -pulse;
+ k = 0;
+ }
+ data[n] = pulse;
+ }
+}
+
+static void FillWithSquarePulseTrain(
+ int half_pulse_width, int offset, int channel, AudioBus* audio_bus) {
+ FillWithSquarePulseTrain(half_pulse_width, offset, audio_bus->frames(),
+ audio_bus->channel(channel));
+}
class AudioRendererAlgorithmTest : public testing::Test {
public:
@@ -118,7 +157,8 @@ class AudioRendererAlgorithmTest : public testing::Test {
void TestPlaybackRate(double playback_rate) {
const int kDefaultBufferSize = algorithm_.samples_per_second() / 100;
- const int kDefaultFramesRequested = 2 * algorithm_.samples_per_second();
+ const int kDefaultFramesRequested = kOutputDurationInSec *
+ algorithm_.samples_per_second();
TestPlaybackRate(
playback_rate, kDefaultBufferSize, kDefaultFramesRequested);
@@ -141,12 +181,21 @@ class AudioRendererAlgorithmTest : public testing::Test {
}
int frames_remaining = total_frames_requested;
+ bool first_fill_buffer = true;
while (frames_remaining > 0) {
int frames_requested = std::min(buffer_size_in_frames, frames_remaining);
int frames_written = algorithm_.FillBuffer(bus.get(), frames_requested);
ASSERT_GT(frames_written, 0) << "Requested: " << frames_requested
<< ", playing at " << playback_rate;
- CheckFakeData(bus.get(), frames_written);
+
+ // Do not check data if it is first pull out and only one frame written.
+ // The very first frame out of WSOLA is always zero because of
+ // overlap-and-add window, which is zero for the first sample. Therefore,
+ // if at very first buffer-fill only one frame is written, that is zero
+ // which might cause exception in CheckFakeData().
+ if (!first_fill_buffer || frames_written > 1)
+ CheckFakeData(bus.get(), frames_written);
+ first_fill_buffer = false;
frames_remaining -= frames_written;
FillAlgorithmQueue();
@@ -175,6 +224,79 @@ class AudioRendererAlgorithmTest : public testing::Test {
EXPECT_NEAR(playback_rate, actual_playback_rate, playback_rate / 100.0);
}
+ void WsolaTest(float playback_rate) {
+ const int kSampleRateHz = 48000;
+ const media::ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+ const int kBytesPerSample = 2;
+ const int kNumFrames = kSampleRateHz / 100; // 10 milliseconds.
+
+ channels_ = ChannelLayoutToChannelCount(kChannelLayout);
+ AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
+ kSampleRateHz, kBytesPerSample * 8, kNumFrames);
+ algorithm_.Initialize(playback_rate, params);
+
+ // A pulse is 6 milliseconds (even number of samples).
+ const int kPulseWidthSamples = 6 * kSampleRateHz / 1000;
+ const int kHalfPulseWidthSamples = kPulseWidthSamples / 2;
+
+ // For the ease of implementation get 1 frame every call to FillBuffer().
+ scoped_ptr<AudioBus> output = AudioBus::Create(channels_, 1);
+
+ // Input buffer to inject pulses.
+ scoped_refptr<AudioBuffer> input = AudioBuffer::CreateBuffer(
+ kSampleFormatPlanarF32, channels_, kPulseWidthSamples);
+
+ const std::vector<uint8*>& channel_data = input->channel_data();
+
+ // Fill |input| channels.
+ FillWithSquarePulseTrain(kHalfPulseWidthSamples, 0, kPulseWidthSamples,
+ reinterpret_cast<float*>(channel_data[0]));
+ FillWithSquarePulseTrain(kHalfPulseWidthSamples, kHalfPulseWidthSamples,
+ kPulseWidthSamples,
+ reinterpret_cast<float*>(channel_data[1]));
+
+ // A buffer for the output until a complete pulse is created. Then
+ // reference pulse is compared with this buffer.
+ scoped_ptr<AudioBus> pulse_buffer = AudioBus::Create(
+ channels_, kPulseWidthSamples);
+
+ const float kTolerance = 0.000001f;
+ // Equivalent of 4 seconds.
+ const int kNumRequestedPulses = kSampleRateHz * 4 / kPulseWidthSamples;
+ for (int n = 0; n < kNumRequestedPulses; ++n) {
+ int num_buffered_frames = 0;
+ while (num_buffered_frames < kPulseWidthSamples) {
+ int num_samples = algorithm_.FillBuffer(output.get(), 1);
+ ASSERT_LE(num_samples, 1);
+ if (num_samples > 0) {
+ output->CopyPartialFramesTo(0, num_samples, num_buffered_frames,
+ pulse_buffer.get());
+ num_buffered_frames++;
+ } else {
+ algorithm_.EnqueueBuffer(input);
+ }
+ }
+
+ // Pulses in the first half of WSOLA AOL frame are not constructed
+ // perfectly. Do not check them.
+ if (n > 3) {
+ for (int m = 0; m < channels_; ++m) {
+ const float* pulse_ch = pulse_buffer->channel(m);
+
+ // Because of overlap-and-add we might have round off error.
+ for (int k = 0; k < kPulseWidthSamples; ++k) {
+ ASSERT_NEAR(reinterpret_cast<float*>(channel_data[m])[k],
+ pulse_ch[k], kTolerance) << " loop " << n
+ << " channel/sample " << m << "/" << k;
+ }
+ }
+ }
+
+ // Zero out the buffer to be sure the next comparison is relevant.
+ pulse_buffer->Zero();
+ }
+ }
+
protected:
AudioRendererAlgorithm algorithm_;
int frames_enqueued_;
@@ -270,7 +392,7 @@ TEST_F(AudioRendererAlgorithmTest, FillBuffer_JumpAroundSpeeds) {
TEST_F(AudioRendererAlgorithmTest, FillBuffer_SmallBufferSize) {
Initialize();
static const int kBufferSizeInFrames = 1;
- static const int kFramesRequested = 2 * kSamplesPerSecond;
+ static const int kFramesRequested = kOutputDurationInSec * kSamplesPerSecond;
TestPlaybackRate(1.0, kBufferSizeInFrames, kFramesRequested);
TestPlaybackRate(0.5, kBufferSizeInFrames, kFramesRequested);
TestPlaybackRate(1.5, kBufferSizeInFrames, kFramesRequested);
@@ -297,4 +419,195 @@ TEST_F(AudioRendererAlgorithmTest, FillBuffer_HigherQualityAudio) {
TestPlaybackRate(1.5);
}
+TEST_F(AudioRendererAlgorithmTest, DotProduct) {
+ const int kChannels = 3;
+ const int kFrames = 20;
+ const int kHalfPulseWidth = 2;
+
+ scoped_ptr<AudioBus> a = AudioBus::Create(kChannels, kFrames);
+ scoped_ptr<AudioBus> b = AudioBus::Create(kChannels, kFrames);
+
+ scoped_ptr<float[]> dot_prod(new float[kChannels]);
+
+ FillWithSquarePulseTrain(kHalfPulseWidth, 0, 0, a.get());
+ FillWithSquarePulseTrain(kHalfPulseWidth, 1, 1, a.get());
+ FillWithSquarePulseTrain(kHalfPulseWidth, 2, 2, a.get());
+
+ FillWithSquarePulseTrain(kHalfPulseWidth, 0, 0, b.get());
+ FillWithSquarePulseTrain(kHalfPulseWidth, 0, 1, b.get());
+ FillWithSquarePulseTrain(kHalfPulseWidth, 0, 2, b.get());
+
+ internal::MultiChannelDotProduct(a.get(), 0, b.get(), 0, kFrames,
+ dot_prod.get());
+
+ EXPECT_FLOAT_EQ(kFrames, dot_prod[0]);
+ EXPECT_FLOAT_EQ(0, dot_prod[1]);
+ EXPECT_FLOAT_EQ(-kFrames, dot_prod[2]);
+
+ internal::MultiChannelDotProduct(a.get(), 4, b.get(), 8, kFrames / 2,
+ dot_prod.get());
+
+ EXPECT_FLOAT_EQ(kFrames / 2, dot_prod[0]);
+ EXPECT_FLOAT_EQ(0, dot_prod[1]);
+ EXPECT_FLOAT_EQ(-kFrames / 2, dot_prod[2]);
+}
+
+TEST_F(AudioRendererAlgorithmTest, MovingBlockEnergy) {
+ const int kChannels = 2;
+ const int kFrames = 20;
+ const int kFramesPerBlock = 3;
+ const int kNumBlocks = kFrames - (kFramesPerBlock - 1);
+ scoped_ptr<AudioBus> a = AudioBus::Create(kChannels, kFrames);
+ scoped_ptr<float[]> energies(new float[kChannels * kNumBlocks]);
+ float* ch_left = a->channel(0);
+ float* ch_right = a->channel(1);
+
+ // Fill up both channels.
+ for (int n = 0; n < kFrames; ++n) {
+ ch_left[n] = n;
+ ch_right[n] = kFrames - 1 - n;
+ }
+
+ internal::MultiChannelMovingBlockEnergies(a.get(), kFramesPerBlock,
+ energies.get());
+
+ // Check if the energy of candidate blocks of each channel computed correctly.
+ for (int n = 0; n < kNumBlocks; ++n) {
+ float expected_energy = 0;
+ for (int k = 0; k < kFramesPerBlock; ++k)
+ expected_energy += ch_left[n + k] * ch_left[n + k];
+
+ // Left (first) channel.
+ EXPECT_FLOAT_EQ(expected_energy, energies[2 * n]);
+
+ expected_energy = 0;
+ for (int k = 0; k < kFramesPerBlock; ++k)
+ expected_energy += ch_right[n + k] * ch_right[n + k];
+
+ // Second (right) channel.
+ EXPECT_FLOAT_EQ(expected_energy, energies[2 * n + 1]);
+ }
+}
+
+TEST_F(AudioRendererAlgorithmTest, FullAndDecimatedSearch) {
+ const int kFramesInSearchRegion = 12;
+ const int kChannels = 2;
+ float ch_0[] = {
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f };
+ float ch_1[] = {
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.1f, 1.0f, 0.1f, 0.0f, 0.0f };
+ ASSERT_EQ(sizeof(ch_0), sizeof(ch_1));
+ ASSERT_EQ(static_cast<size_t>(kFramesInSearchRegion),
+ sizeof(ch_0) / sizeof(*ch_0));
+ scoped_ptr<AudioBus> search_region = AudioBus::Create(kChannels,
+ kFramesInSearchRegion);
+ float* ch = search_region->channel(0);
+ memcpy(ch, ch_0, sizeof(float) * kFramesInSearchRegion);
+ ch = search_region->channel(1);
+ memcpy(ch, ch_1, sizeof(float) * kFramesInSearchRegion);
+
+ const int kFramePerBlock = 4;
+ float target_0[] = { 1.0f, 1.0f, 1.0f, 0.0f };
+ float target_1[] = { 0.0f, 1.0f, 0.1f, 1.0f };
+ ASSERT_EQ(sizeof(target_0), sizeof(target_1));
+ ASSERT_EQ(static_cast<size_t>(kFramePerBlock),
+ sizeof(target_0) / sizeof(*target_0));
+
+ scoped_ptr<AudioBus> target = AudioBus::Create(kChannels,
+ kFramePerBlock);
+ ch = target->channel(0);
+ memcpy(ch, target_0, sizeof(float) * kFramePerBlock);
+ ch = target->channel(1);
+ memcpy(ch, target_1, sizeof(float) * kFramePerBlock);
+
+ scoped_ptr<float[]> energy_target(new float[kChannels]);
+
+ internal::MultiChannelDotProduct(target.get(), 0, target.get(), 0,
+ kFramePerBlock, energy_target.get());
+
+ ASSERT_EQ(3.f, energy_target[0]);
+ ASSERT_EQ(2.01f, energy_target[1]);
+
+ const int kNumCandidBlocks = kFramesInSearchRegion - (kFramePerBlock - 1);
+ scoped_ptr<float[]> energy_candid_blocks(new float[kNumCandidBlocks *
+ kChannels]);
+
+ internal::MultiChannelMovingBlockEnergies(
+ search_region.get(), kFramePerBlock, energy_candid_blocks.get());
+
+ // Check the energy of the candidate blocks of the first channel.
+ ASSERT_FLOAT_EQ(0, energy_candid_blocks[0]);
+ ASSERT_FLOAT_EQ(0, energy_candid_blocks[2]);
+ ASSERT_FLOAT_EQ(1, energy_candid_blocks[4]);
+ ASSERT_FLOAT_EQ(2, energy_candid_blocks[6]);
+ ASSERT_FLOAT_EQ(3, energy_candid_blocks[8]);
+ ASSERT_FLOAT_EQ(3, energy_candid_blocks[10]);
+ ASSERT_FLOAT_EQ(2, energy_candid_blocks[12]);
+ ASSERT_FLOAT_EQ(1, energy_candid_blocks[14]);
+ ASSERT_FLOAT_EQ(0, energy_candid_blocks[16]);
+
+ // Check the energy of the candidate blocks of the second channel.
+ ASSERT_FLOAT_EQ(0, energy_candid_blocks[1]);
+ ASSERT_FLOAT_EQ(0, energy_candid_blocks[3]);
+ ASSERT_FLOAT_EQ(0, energy_candid_blocks[5]);
+ ASSERT_FLOAT_EQ(0, energy_candid_blocks[7]);
+ ASSERT_FLOAT_EQ(0.01f, energy_candid_blocks[9]);
+ ASSERT_FLOAT_EQ(1.01f, energy_candid_blocks[11]);
+ ASSERT_FLOAT_EQ(1.02f, energy_candid_blocks[13]);
+ ASSERT_FLOAT_EQ(1.02f, energy_candid_blocks[15]);
+ ASSERT_FLOAT_EQ(1.01f, energy_candid_blocks[17]);
+
+ // An interval which is of no effect.
+ internal::Interval exclude_interval = std::make_pair(-100, -10);
+ EXPECT_EQ(5, internal::FullSearch(
+ 0, kNumCandidBlocks - 1, exclude_interval, target.get(),
+ search_region.get(), energy_target.get(), energy_candid_blocks.get()));
+
+ // Exclude the the best match.
+ exclude_interval = std::make_pair(2, 5);
+ EXPECT_EQ(7, internal::FullSearch(
+ 0, kNumCandidBlocks - 1, exclude_interval, target.get(),
+ search_region.get(), energy_target.get(), energy_candid_blocks.get()));
+
+ // An interval which is of no effect.
+ exclude_interval = std::make_pair(-100, -10);
+ EXPECT_EQ(4, internal::DecimatedSearch(
+ 4, exclude_interval, target.get(), search_region.get(),
+ energy_target.get(), energy_candid_blocks.get()));
+
+ EXPECT_EQ(5, internal::OptimalIndex(search_region.get(), target.get(),
+ exclude_interval));
+}
+
+TEST_F(AudioRendererAlgorithmTest, CubicInterpolation) {
+ // Arbitrary coefficients.
+ const float kA = 0.7f;
+ const float kB = 1.2f;
+ const float kC = 0.8f;
+
+ float y_values[3];
+ y_values[0] = kA - kB + kC;
+ y_values[1] = kC;
+ y_values[2] = kA + kB + kC;
+
+ float extremum;
+ float extremum_value;
+
+ internal::CubicInterpolation(y_values, &extremum, &extremum_value);
+
+ float x_star = -kB / (2.f * kA);
+ float y_star = kA * x_star * x_star + kB * x_star + kC;
+
+ EXPECT_FLOAT_EQ(x_star, extremum);
+ EXPECT_FLOAT_EQ(y_star, extremum_value);
+}
+
+TEST_F(AudioRendererAlgorithmTest, WsolaSlowdown) {
+ WsolaTest(0.6f);
+}
+
+TEST_F(AudioRendererAlgorithmTest, WsolaSpeedup) {
+ WsolaTest(1.6f);
+}
+
} // namespace media
diff --git a/chromium/media/filters/blocking_url_protocol.cc b/chromium/media/filters/blocking_url_protocol.cc
index 68e883c213d..e50b677d035 100644
--- a/chromium/media/filters/blocking_url_protocol.cc
+++ b/chromium/media/filters/blocking_url_protocol.cc
@@ -67,7 +67,7 @@ bool BlockingUrlProtocol::GetPosition(int64* position_out) {
bool BlockingUrlProtocol::SetPosition(int64 position) {
int64 file_size;
- if ((data_source_->GetSize(&file_size) && position >= file_size) ||
+ if ((data_source_->GetSize(&file_size) && position > file_size) ||
position < 0) {
return false;
}
diff --git a/chromium/media/filters/blocking_url_protocol_unittest.cc b/chromium/media/filters/blocking_url_protocol_unittest.cc
index 4886ba77301..d8d1dfc34e6 100644
--- a/chromium/media/filters/blocking_url_protocol_unittest.cc
+++ b/chromium/media/filters/blocking_url_protocol_unittest.cc
@@ -90,11 +90,14 @@ TEST_F(BlockingUrlProtocolTest, GetSetPosition) {
EXPECT_TRUE(url_protocol_.GetPosition(&position));
EXPECT_TRUE(url_protocol_.SetPosition(512));
- EXPECT_FALSE(url_protocol_.SetPosition(size));
EXPECT_FALSE(url_protocol_.SetPosition(size + 1));
EXPECT_FALSE(url_protocol_.SetPosition(-1));
EXPECT_TRUE(url_protocol_.GetPosition(&position));
EXPECT_EQ(512, position);
+
+ EXPECT_TRUE(url_protocol_.SetPosition(size));
+ EXPECT_TRUE(url_protocol_.GetPosition(&position));
+ EXPECT_EQ(size, position);
}
TEST_F(BlockingUrlProtocolTest, GetSize) {
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index 6f45a1f9dd8..a4a67f2d640 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -874,7 +874,6 @@ void ChunkDemuxer::CancelPendingSeek(TimeDelta seek_time) {
ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
const std::string& type,
std::vector<std::string>& codecs) {
- DCHECK_GT(codecs.size(), 0u);
base::AutoLock auto_lock(lock_);
if ((state_ != WAITING_FOR_INIT && state_ != INITIALIZING) || IsValidId(id))
diff --git a/chromium/media/filters/chunk_demuxer.h b/chromium/media/filters/chunk_demuxer.h
index 0a2f67bd319..e7f6caed37c 100644
--- a/chromium/media/filters/chunk_demuxer.h
+++ b/chromium/media/filters/chunk_demuxer.h
@@ -34,10 +34,6 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
kReachedIdLimit, // Reached ID limit. We can't handle any more IDs.
};
- typedef base::Callback<void(const std::string& type,
- scoped_ptr<uint8[]> init_data,
- int init_data_size)> NeedKeyCB;
-
// |open_cb| Run when Initialize() is called to signal that the demuxer
// is ready to receive media data via AppenData().
// |need_key_cb| Run when the demuxer determines that an encryption key is
diff --git a/chromium/media/filters/chunk_demuxer_unittest.cc b/chromium/media/filters/chunk_demuxer_unittest.cc
index e3a84e66d35..3d9b26f681c 100644
--- a/chromium/media/filters/chunk_demuxer_unittest.cc
+++ b/chromium/media/filters/chunk_demuxer_unittest.cc
@@ -157,7 +157,7 @@ class ChunkDemuxerTest : public testing::Test {
void CreateNewDemuxer() {
base::Closure open_cb =
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
- ChunkDemuxer::NeedKeyCB need_key_cb =
+ Demuxer::NeedKeyCB need_key_cb =
base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
AddTextTrackCB add_text_track_cb =
base::Bind(&ChunkDemuxerTest::OnTextTrack, base::Unretained(this));
@@ -856,8 +856,9 @@ class ChunkDemuxerTest : public testing::Test {
MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
const uint8* init_data, int init_data_size));
void DemuxerNeedKey(const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_size) {
- NeedKeyMock(type, init_data.get(), init_data_size);
+ const std::vector<uint8>& init_data) {
+ const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
+ NeedKeyMock(type, init_data_ptr, init_data.size());
}
scoped_ptr<TextTrack> OnTextTrack(TextKind kind,
@@ -2014,7 +2015,7 @@ TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
expected = ChunkDemuxer::kOk;
#endif
@@ -2029,7 +2030,7 @@ TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
expected = ChunkDemuxer::kOk;
#endif
const char* codec_ids[] = {
diff --git a/chromium/media/filters/decrypting_audio_decoder.cc b/chromium/media/filters/decrypting_audio_decoder.cc
index f516674a50e..2c144b4fc7e 100644
--- a/chromium/media/filters/decrypting_audio_decoder.cc
+++ b/chromium/media/filters/decrypting_audio_decoder.cc
@@ -191,7 +191,9 @@ void DecryptingAudioDecoder::SetDecryptor(Decryptor* decryptor) {
input_config.extra_data(),
input_config.extra_data_size(),
input_config.is_encrypted(),
- false);
+ false,
+ base::TimeDelta(),
+ base::TimeDelta());
state_ = kPendingDecoderInit;
decryptor_->InitializeAudioDecoder(
@@ -282,7 +284,9 @@ void DecryptingAudioDecoder::DecryptAndDecodeBuffer(
input_config.extra_data(),
input_config.extra_data_size(),
input_config.is_encrypted(),
- false);
+ false,
+ base::TimeDelta(),
+ base::TimeDelta());
state_ = kPendingConfigChange;
decryptor_->DeinitializeDecoder(Decryptor::kAudio);
diff --git a/chromium/media/filters/decrypting_audio_decoder_unittest.cc b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
index fb97b915729..2f07e231c03 100644
--- a/chromium/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
@@ -113,7 +113,8 @@ class DecryptingAudioDecoderTest : public testing::Test {
.WillOnce(SaveArg<1>(&key_added_cb_));
config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true, true);
+ CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true, true,
+ base::TimeDelta(), base::TimeDelta());
InitializeAndExpectStatus(config_, PIPELINE_OK);
EXPECT_EQ(DecryptingAudioDecoder::kSupportedBitsPerChannel,
diff --git a/chromium/media/filters/decrypting_demuxer_stream.cc b/chromium/media/filters/decrypting_demuxer_stream.cc
index 1f183ceb288..55021489ba6 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream.cc
@@ -43,17 +43,16 @@ DecryptingDemuxerStream::DecryptingDemuxerStream(
key_added_while_decrypt_pending_(false) {
}
-void DecryptingDemuxerStream::Initialize(
- DemuxerStream* stream,
- const PipelineStatusCB& status_cb) {
- DVLOG(2) << "Initialize()";
+void DecryptingDemuxerStream::Initialize(DemuxerStream* stream,
+ const PipelineStatusCB& status_cb) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, kUninitialized) << state_;
DCHECK(!demuxer_stream_);
weak_this_ = weak_factory_.GetWeakPtr();
demuxer_stream_ = stream;
- init_cb_ = status_cb;
+ init_cb_ = BindToCurrentLoop(status_cb);
InitializeDecoderConfig();
@@ -63,27 +62,34 @@ void DecryptingDemuxerStream::Initialize(
}
void DecryptingDemuxerStream::Read(const ReadCB& read_cb) {
- DVLOG(3) << "Read()";
+ DVLOG(3) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, kIdle) << state_;
DCHECK(!read_cb.is_null());
CHECK(read_cb_.is_null()) << "Overlapping reads are not supported.";
- read_cb_ = read_cb;
+ read_cb_ = BindToCurrentLoop(read_cb);
state_ = kPendingDemuxerRead;
demuxer_stream_->Read(
base::Bind(&DecryptingDemuxerStream::DecryptBuffer, weak_this_));
}
void DecryptingDemuxerStream::Reset(const base::Closure& closure) {
- DVLOG(2) << "Reset() - state: " << state_;
+ DVLOG(2) << __FUNCTION__ << " - state: " << state_;
DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(state_ != kUninitialized && state_ != kDecryptorRequested) << state_;
- DCHECK(init_cb_.is_null()); // No Reset() during pending initialization.
+ DCHECK(state_ != kUninitialized) << state_;
DCHECK(reset_cb_.is_null());
reset_cb_ = BindToCurrentLoop(closure);
+ if (state_ == kDecryptorRequested) {
+ DCHECK(!init_cb_.is_null());
+ set_decryptor_ready_cb_.Run(DecryptorReadyCB());
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
+ DoReset();
+ return;
+ }
+
decryptor_->CancelDecrypt(GetDecryptorStreamType());
// Reset() cannot complete if the read callback is still pending.
@@ -126,10 +132,12 @@ void DecryptingDemuxerStream::EnableBitstreamConverter() {
demuxer_stream_->EnableBitstreamConverter();
}
-DecryptingDemuxerStream::~DecryptingDemuxerStream() {}
+DecryptingDemuxerStream::~DecryptingDemuxerStream() {
+ DVLOG(2) << __FUNCTION__ << " : state_ = " << state_;
+}
void DecryptingDemuxerStream::SetDecryptor(Decryptor* decryptor) {
- DVLOG(2) << "SetDecryptor()";
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, kDecryptorRequested) << state_;
DCHECK(!init_cb_.is_null());
@@ -138,8 +146,8 @@ void DecryptingDemuxerStream::SetDecryptor(Decryptor* decryptor) {
set_decryptor_ready_cb_.Reset();
if (!decryptor) {
- base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
state_ = kUninitialized;
+ base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
@@ -156,7 +164,7 @@ void DecryptingDemuxerStream::SetDecryptor(Decryptor* decryptor) {
void DecryptingDemuxerStream::DecryptBuffer(
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
- DVLOG(3) << "DecryptBuffer()";
+ DVLOG(3) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDemuxerRead) << state_;
DCHECK(!read_cb_.is_null());
@@ -212,7 +220,7 @@ void DecryptingDemuxerStream::DecryptPendingBuffer() {
void DecryptingDemuxerStream::DeliverBuffer(
Decryptor::Status status,
const scoped_refptr<DecoderBuffer>& decrypted_buffer) {
- DVLOG(3) << "DeliverBuffer() - status: " << status;
+ DVLOG(3) << __FUNCTION__ << " - status: " << status;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecrypt) << state_;
DCHECK_NE(status, Decryptor::kNeedMoreData);
@@ -272,9 +280,15 @@ void DecryptingDemuxerStream::OnKeyAdded() {
}
void DecryptingDemuxerStream::DoReset() {
+ DCHECK(state_ != kUninitialized);
DCHECK(init_cb_.is_null());
DCHECK(read_cb_.is_null());
- state_ = kIdle;
+
+ if (state_ == kDecryptorRequested)
+ state_ = kUninitialized;
+ else
+ state_ = kIdle;
+
base::ResetAndReturn(&reset_cb_).Run();
}
@@ -302,7 +316,9 @@ void DecryptingDemuxerStream::InitializeDecoderConfig() {
input_audio_config.extra_data(),
input_audio_config.extra_data_size(),
false, // Output audio is not encrypted.
- false);
+ false,
+ base::TimeDelta(),
+ base::TimeDelta());
break;
}
diff --git a/chromium/media/filters/decrypting_demuxer_stream.h b/chromium/media/filters/decrypting_demuxer_stream.h
index bd75d665baf..cc34c04e27e 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.h
+++ b/chromium/media/filters/decrypting_demuxer_stream.h
@@ -35,6 +35,10 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
void Initialize(DemuxerStream* stream,
const PipelineStatusCB& status_cb);
+
+ // Cancels all pending operations and fires all pending callbacks. Sets
+ // |this| to kUninitialized state if |this| hasn't been initialized, or to
+ // kIdle state otherwise.
void Reset(const base::Closure& closure);
// DemuxerStream implementation.
diff --git a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
index e971aa64a25..585f3d0eb0a 100644
--- a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
@@ -326,6 +326,19 @@ TEST_F(DecryptingDemuxerStreamTest, KeyAdded_DruingPendingDecrypt) {
message_loop_.RunUntilIdle();
}
+// Test resetting when the DecryptingDemuxerStream is in kDecryptorRequested
+// state.
+TEST_F(DecryptingDemuxerStreamTest, Reset_DuringDecryptorRequested) {
+ // One for decryptor request, one for canceling request during Reset().
+ EXPECT_CALL(*this, RequestDecryptorNotification(_))
+ .Times(2);
+ AudioDecoderConfig input_config(
+ kCodecVorbis, kSampleFormatPlanarF32, CHANNEL_LAYOUT_STEREO, 44100,
+ NULL, 0, true);
+ InitializeAudioAndExpectStatus(input_config, PIPELINE_ERROR_ABORT);
+ Reset();
+}
+
// Test resetting when the DecryptingDemuxerStream is in kIdle state but has
// not returned any buffer.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringIdleAfterInitialization) {
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
index 1c0cc58ecd9..f41c89318cc 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder.cc
@@ -170,24 +170,38 @@ int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
return AVERROR(EINVAL);
// Determine how big the buffer should be and allocate it. FFmpeg may adjust
- // how big each channel data is in order to meet it's alignment policy, so
+ // how big each channel data is in order to meet the alignment policy, so
// we need to take this into consideration.
int buffer_size_in_bytes =
- av_samples_get_buffer_size(NULL, channels, frame->nb_samples, format, 1);
+ av_samples_get_buffer_size(&frame->linesize[0],
+ channels,
+ frame->nb_samples,
+ format,
+ AudioBuffer::kChannelAlignment);
int frames_required = buffer_size_in_bytes / bytes_per_channel / channels;
DCHECK_GE(frames_required, frame->nb_samples);
scoped_refptr<AudioBuffer> buffer =
AudioBuffer::CreateBuffer(sample_format, channels, frames_required);
- // Initialize the data[], linesize[], and extended_data[] fields.
- int ret = avcodec_fill_audio_frame(frame,
- channels,
- format,
- buffer->writable_data(),
- buffer_size_in_bytes,
- 1);
- if (ret < 0)
- return ret;
+ // Initialize the data[] and extended_data[] fields to point into the memory
+ // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved
+ // audio and equal to |channels| for planar audio.
+ int number_of_planes = buffer->channel_data().size();
+ if (number_of_planes <= AV_NUM_DATA_POINTERS) {
+ DCHECK_EQ(frame->extended_data, frame->data);
+ for (int i = 0; i < number_of_planes; ++i)
+ frame->data[i] = buffer->channel_data()[i];
+ } else {
+ // There are more channels than can fit into data[], so allocate
+ // extended_data[] and fill appropriately.
+ frame->extended_data = static_cast<uint8**>(
+ av_malloc(number_of_planes * sizeof(*frame->extended_data)));
+ int i = 0;
+ for (; i < AV_NUM_DATA_POINTERS; ++i)
+ frame->extended_data[i] = frame->data[i] = buffer->channel_data()[i];
+ for (; i < number_of_planes; ++i)
+ frame->extended_data[i] = buffer->channel_data()[i];
+ }
// Now create an AVBufferRef for the data just allocated. It will own the
// reference to the AudioBuffer object.
@@ -337,6 +351,7 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
codec_context_->opaque = this;
codec_context_->get_buffer2 = GetAudioBufferImpl;
+ codec_context_->refcounted_frames = 1;
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
@@ -376,10 +391,8 @@ void FFmpegAudioDecoder::ReleaseFFmpegResources() {
av_free(codec_context_);
}
- if (av_frame_) {
- av_free(av_frame_);
- av_frame_ = NULL;
- }
+ if (av_frame_)
+ av_frame_free(&av_frame_);
}
void FFmpegAudioDecoder::ResetTimestampState() {
@@ -406,9 +419,6 @@ void FFmpegAudioDecoder::RunDecodeLoop(
// want to hand it to the decoder at least once, otherwise we would end up
// skipping end of stream packets since they have a size of zero.
do {
- // Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_);
-
int frame_decoded = 0;
int result = avcodec_decode_audio4(
codec_context_, av_frame_, &frame_decoded, &packet);
@@ -467,6 +477,7 @@ void FFmpegAudioDecoder::RunDecodeLoop(
// This is an unrecoverable error, so bail out.
QueuedAudioBuffer queue_entry = { kDecodeError, NULL };
queued_audio_.push_back(queue_entry);
+ av_frame_unref(av_frame_);
break;
}
@@ -489,8 +500,11 @@ void FFmpegAudioDecoder::RunDecodeLoop(
}
decoded_frames = output->frame_count();
+ av_frame_unref(av_frame_);
}
+ // WARNING: |av_frame_| no longer has valid data at this point.
+
if (decoded_frames > 0) {
// Set the timestamp/duration once all the extra frames have been
// discarded.
diff --git a/chromium/media/filters/ffmpeg_demuxer.cc b/chromium/media/filters/ffmpeg_demuxer.cc
index 14b1fff99cc..723eb5f28d9 100644
--- a/chromium/media/filters/ffmpeg_demuxer.cc
+++ b/chromium/media/filters/ffmpeg_demuxer.cc
@@ -287,7 +287,7 @@ base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
FFmpegDemuxer::FFmpegDemuxer(
const scoped_refptr<base::MessageLoopProxy>& message_loop,
DataSource* data_source,
- const FFmpegNeedKeyCB& need_key_cb,
+ const NeedKeyCB& need_key_cb,
const scoped_refptr<MediaLog>& media_log)
: host_(NULL),
message_loop_(message_loop),
@@ -316,12 +316,7 @@ void FFmpegDemuxer::Stop(const base::Closure& callback) {
data_source_->Stop(BindToCurrentLoop(base::Bind(
&FFmpegDemuxer::OnDataSourceStopped, weak_this_,
BindToCurrentLoop(callback))));
-
- // TODO(scherkus): Reenable after figuring why Stop() gets called multiple
- // times, see http://crbug.com/235933
-#if 0
data_source_ = NULL;
-#endif
}
void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
@@ -476,7 +471,7 @@ void FFmpegDemuxer::OnOpenContextDone(const PipelineStatusCB& status_cb,
void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
int result) {
DCHECK(message_loop_->BelongsToCurrentThread());
- if (!blocking_thread_.IsRunning()) {
+ if (!blocking_thread_.IsRunning() || !data_source_) {
status_cb.Run(PIPELINE_ERROR_ABORT);
return;
}
@@ -595,10 +590,11 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
media_log_->SetStringProperty("audio_sample_format", sample_name);
- media_log_->SetStringProperty("audio_codec_name",
- audio_codec->codec_name);
- media_log_->SetIntegerProperty("audio_sample_rate",
- audio_codec->sample_rate);
+ AVCodec* codec = avcodec_find_decoder(audio_codec->codec_id);
+ if (codec) {
+ media_log_->SetStringProperty("audio_codec_name", codec->name);
+ }
+
media_log_->SetIntegerProperty("audio_channels_count",
audio_codec->channels);
media_log_->SetIntegerProperty("audio_samples_per_second",
@@ -611,7 +607,12 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
if (video_stream) {
AVCodecContext* video_codec = video_stream->codec;
media_log_->SetBooleanProperty("found_video_stream", true);
- media_log_->SetStringProperty("video_codec_name", video_codec->codec_name);
+
+ AVCodec* codec = avcodec_find_decoder(video_codec->codec_id);
+ if (codec) {
+ media_log_->SetStringProperty("video_codec_name", codec->name);
+ }
+
media_log_->SetIntegerProperty("width", video_codec->width);
media_log_->SetIntegerProperty("height", video_codec->height);
media_log_->SetIntegerProperty("coded_width",
@@ -634,8 +635,6 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
media_log_->SetDoubleProperty("max_duration", max_duration.InSecondsF());
media_log_->SetDoubleProperty("start_time", start_time_.InSecondsF());
- media_log_->SetDoubleProperty("filesize_in_bytes",
- static_cast<double>(filesize_in_bytes));
media_log_->SetIntegerProperty("bitrate", bitrate_);
status_cb.Run(PIPELINE_OK);
@@ -809,10 +808,9 @@ void FFmpegDemuxer::StreamHasEnded() {
void FFmpegDemuxer::FireNeedKey(const std::string& init_data_type,
const std::string& encryption_key_id) {
- int key_id_size = encryption_key_id.size();
- scoped_ptr<uint8[]> key_id_local(new uint8[key_id_size]);
- memcpy(key_id_local.get(), encryption_key_id.data(), key_id_size);
- need_key_cb_.Run(init_data_type, key_id_local.Pass(), key_id_size);
+ std::vector<uint8> key_id_local(encryption_key_id.begin(),
+ encryption_key_id.end());
+ need_key_cb_.Run(init_data_type, key_id_local);
}
void FFmpegDemuxer::NotifyCapacityAvailable() {
diff --git a/chromium/media/filters/ffmpeg_demuxer.h b/chromium/media/filters/ffmpeg_demuxer.h
index 92b3eab8316..7304beab244 100644
--- a/chromium/media/filters/ffmpeg_demuxer.h
+++ b/chromium/media/filters/ffmpeg_demuxer.h
@@ -44,14 +44,6 @@ struct AVStream;
namespace media {
-// A new potentially encrypted stream has been parsed.
-// First parameter - The type of initialization data.
-// Second parameter - The initialization data associated with the stream.
-// Third parameter - Number of bytes of the initialization data.
-typedef base::Callback<void(const std::string& type,
- scoped_ptr<uint8[]> init_data,
- int init_data_size)> FFmpegNeedKeyCB;
-
class MediaLog;
class FFmpegDemuxer;
class FFmpegGlue;
@@ -138,7 +130,7 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
public:
FFmpegDemuxer(const scoped_refptr<base::MessageLoopProxy>& message_loop,
DataSource* data_source,
- const FFmpegNeedKeyCB& need_key_cb,
+ const NeedKeyCB& need_key_cb,
const scoped_refptr<MediaLog>& media_log);
virtual ~FFmpegDemuxer();
@@ -250,7 +242,7 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
BlockingUrlProtocol url_protocol_;
scoped_ptr<FFmpegGlue> glue_;
- const FFmpegNeedKeyCB need_key_cb_;
+ const NeedKeyCB need_key_cb_;
DISALLOW_COPY_AND_ASSIGN(FFmpegDemuxer);
};
diff --git a/chromium/media/filters/ffmpeg_demuxer_unittest.cc b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
index c1da0cc1b46..f5b0e978fe4 100644
--- a/chromium/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
@@ -81,7 +81,7 @@ class FFmpegDemuxerTest : public testing::Test {
CreateDataSource(name);
- media::FFmpegNeedKeyCB need_key_cb =
+ Demuxer::NeedKeyCB need_key_cb =
base::Bind(&FFmpegDemuxerTest::NeedKeyCB, base::Unretained(this));
demuxer_.reset(new FFmpegDemuxer(message_loop_.message_loop_proxy(),
data_source_.get(),
@@ -136,8 +136,9 @@ class FFmpegDemuxerTest : public testing::Test {
MOCK_METHOD3(NeedKeyCBMock, void(const std::string& type,
const uint8* init_data, int init_data_size));
void NeedKeyCB(const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_size) {
- NeedKeyCBMock(type, init_data.get(), init_data_size);
+ const std::vector<uint8>& init_data) {
+ const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
+ NeedKeyCBMock(type, init_data_ptr, init_data.size());
}
// Accessor to demuxer internals.
@@ -433,7 +434,9 @@ TEST_F(FFmpegDemuxerTest, Stop) {
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
ASSERT_TRUE(audio);
- demuxer_->Stop(NewExpectedClosure());
+ WaitableMessageLoopEvent event;
+ demuxer_->Stop(event.GetClosure());
+ event.RunAndWait();
// Reads after being stopped are all EOS buffers.
StrictMock<MockReadCB> callback;
@@ -442,6 +445,9 @@ TEST_F(FFmpegDemuxerTest, Stop) {
// Attempt the read...
audio->Read(base::Bind(&MockReadCB::Run, base::Unretained(&callback)));
message_loop_.RunUntilIdle();
+
+ // Don't let the test call Stop() again.
+ demuxer_.reset();
}
TEST_F(FFmpegDemuxerTest, DisableAudioStream) {
diff --git a/chromium/media/filters/mock_gpu_video_decoder_factories.cc b/chromium/media/filters/gpu_video_accelerator_factories.cc
index 9a16a802c9d..f9f56604d25 100644
--- a/chromium/media/filters/mock_gpu_video_decoder_factories.cc
+++ b/chromium/media/filters/gpu_video_accelerator_factories.cc
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/mock_gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
namespace media {
-MockGpuVideoDecoderFactories::MockGpuVideoDecoderFactories() {}
-
-MockGpuVideoDecoderFactories::~MockGpuVideoDecoderFactories() {}
+GpuVideoAcceleratorFactories::~GpuVideoAcceleratorFactories() {}
} // namespace media
diff --git a/chromium/media/filters/gpu_video_decoder_factories.h b/chromium/media/filters/gpu_video_accelerator_factories.h
index 107e2de8c3b..3ee79ac6a5c 100644
--- a/chromium/media/filters/gpu_video_decoder_factories.h
+++ b/chromium/media/filters/gpu_video_accelerator_factories.h
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_GPU_VIDEO_DECODER_FACTORIES_H_
-#define MEDIA_FILTERS_GPU_VIDEO_DECODER_FACTORIES_H_
+#ifndef MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#define MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "media/video/video_decode_accelerator.h"
+#include "media/video/video_encode_accelerator.h"
namespace base {
class MessageLoopProxy;
@@ -18,15 +20,19 @@ class SkBitmap;
namespace media {
// Helper interface for specifying factories needed to instantiate a hardware
-// video decoder.
-class MEDIA_EXPORT GpuVideoDecoderFactories
- : public base::RefCountedThreadSafe<GpuVideoDecoderFactories> {
+// video accelerator.
+class MEDIA_EXPORT GpuVideoAcceleratorFactories
+ : public base::RefCountedThreadSafe<GpuVideoAcceleratorFactories> {
public:
// Caller owns returned pointer.
- virtual VideoDecodeAccelerator* CreateVideoDecodeAccelerator(
+ virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator(
VideoCodecProfile profile,
VideoDecodeAccelerator::Client* client) = 0;
+ // Caller owns returned pointer.
+ virtual scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator(
+ VideoEncodeAccelerator::Client* client) = 0;
+
// Allocate & delete native textures.
virtual uint32 CreateTextures(int32 count,
const gfx::Size& size,
@@ -47,7 +53,7 @@ class MEDIA_EXPORT GpuVideoDecoderFactories
// Close()ing the returned pointer.
virtual base::SharedMemory* CreateSharedMemory(size_t size) = 0;
- // Returns the message loop the VideoDecodeAccelerator runs on.
+ // Returns the message loop the video accelerator runs on.
virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() = 0;
// Abort any outstanding factory operations and error any future
@@ -58,10 +64,10 @@ class MEDIA_EXPORT GpuVideoDecoderFactories
virtual bool IsAborted() = 0;
protected:
- friend class base::RefCountedThreadSafe<GpuVideoDecoderFactories>;
- virtual ~GpuVideoDecoderFactories();
+ friend class base::RefCountedThreadSafe<GpuVideoAcceleratorFactories>;
+ virtual ~GpuVideoAcceleratorFactories();
};
} // namespace media
-#endif // MEDIA_FILTERS_GPU_VIDEO_DECODER_FACTORIES_H_
+#endif // MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
diff --git a/chromium/media/filters/gpu_video_decoder.cc b/chromium/media/filters/gpu_video_decoder.cc
index 30c6dfa94f5..273542e85ee 100644
--- a/chromium/media/filters/gpu_video_decoder.cc
+++ b/chromium/media/filters/gpu_video_decoder.cc
@@ -14,10 +14,11 @@
#include "base/task_runner_util.h"
#include "media/base/bind_to_loop.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/media_log.h"
#include "media/base/pipeline.h"
#include "media/base/pipeline_status.h"
#include "media/base/video_decoder_config.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
namespace media {
@@ -52,12 +53,14 @@ GpuVideoDecoder::BufferData::BufferData(
GpuVideoDecoder::BufferData::~BufferData() {}
GpuVideoDecoder::GpuVideoDecoder(
- const scoped_refptr<GpuVideoDecoderFactories>& factories)
+ const scoped_refptr<GpuVideoAcceleratorFactories>& factories,
+ const scoped_refptr<MediaLog>& media_log)
: needs_bitstream_conversion_(false),
gvd_loop_proxy_(factories->GetMessageLoop()),
weak_factory_(this),
factories_(factories),
state_(kNormal),
+ media_log_(media_log),
decoder_texture_target_(0),
next_picture_buffer_id_(0),
next_bitstream_buffer_id_(0),
@@ -137,7 +140,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
BindToCurrentLoop(orig_status_cb));
bool previously_initialized = config_.IsValidConfig();
-#if !defined(OS_CHROMEOS)
+#if !defined(OS_CHROMEOS) && !defined(OS_WIN)
if (previously_initialized) {
// TODO(xhwang): Make GpuVideoDecoder reinitializable.
// See http://crbug.com/233608
@@ -173,13 +176,15 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
- vda_.reset(factories_->CreateVideoDecodeAccelerator(config.profile(), this));
+ vda_ =
+ factories_->CreateVideoDecodeAccelerator(config.profile(), this).Pass();
if (!vda_) {
status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
DVLOG(3) << "GpuVideoDecoder::Initialize() succeeded.";
+ media_log_->SetStringProperty("video_decoder", "gpu");
status_cb.Run(PIPELINE_OK);
}
@@ -245,6 +250,11 @@ void GpuVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
if (state_ == kNormal) {
state_ = kDrainingDecoder;
vda_->Flush();
+ // If we have ready frames, go ahead and process them to ensure that the
+ // Flush operation does not block in the VDA due to lack of picture
+ // buffers.
+ if (!ready_video_frames_.empty())
+ EnqueueFrameAndTriggerFrameDelivery(NULL);
}
return;
}
@@ -435,7 +445,7 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
visible_rect,
natural_size,
timestamp,
- base::Bind(&GpuVideoDecoderFactories::ReadPixels,
+ base::Bind(&GpuVideoAcceleratorFactories::ReadPixels,
factories_,
pb.texture_id(),
decoder_texture_target_,
diff --git a/chromium/media/filters/gpu_video_decoder.h b/chromium/media/filters/gpu_video_decoder.h
index 29c330d2141..f7fff52e3fa 100644
--- a/chromium/media/filters/gpu_video_decoder.h
+++ b/chromium/media/filters/gpu_video_decoder.h
@@ -26,7 +26,8 @@ class SharedMemory;
namespace media {
class DecoderBuffer;
-class GpuVideoDecoderFactories;
+class GpuVideoAcceleratorFactories;
+class MediaLog;
// GPU-accelerated video decoder implementation. Relies on
// AcceleratedVideoDecoderMsg_Decode and friends.
@@ -36,7 +37,8 @@ class MEDIA_EXPORT GpuVideoDecoder
public:
// The message loop of |factories| will be saved to |gvd_loop_proxy_|.
explicit GpuVideoDecoder(
- const scoped_refptr<GpuVideoDecoderFactories>& factories);
+ const scoped_refptr<GpuVideoAcceleratorFactories>& factories,
+ const scoped_refptr<MediaLog>& media_log);
// VideoDecoder implementation.
virtual void Initialize(const VideoDecoderConfig& config,
@@ -117,7 +119,7 @@ class MEDIA_EXPORT GpuVideoDecoder
base::WeakPtrFactory<GpuVideoDecoder> weak_factory_;
base::WeakPtr<GpuVideoDecoder> weak_this_;
- scoped_refptr<GpuVideoDecoderFactories> factories_;
+ scoped_refptr<GpuVideoAcceleratorFactories> factories_;
// Populated during Initialize() (on success) and unchanged until an error
// occurs.
@@ -137,6 +139,8 @@ class MEDIA_EXPORT GpuVideoDecoder
// steady-state of the decoder.
std::vector<SHMBuffer*> available_shm_segments_;
+ scoped_refptr<MediaLog> media_log_;
+
// Book-keeping variables.
struct BufferPair {
BufferPair(SHMBuffer* s, const scoped_refptr<DecoderBuffer>& b);
diff --git a/chromium/media/filters/in_memory_url_protocol.cc b/chromium/media/filters/in_memory_url_protocol.cc
index c55438c7647..85fa290e501 100644
--- a/chromium/media/filters/in_memory_url_protocol.cc
+++ b/chromium/media/filters/in_memory_url_protocol.cc
@@ -35,7 +35,7 @@ bool InMemoryUrlProtocol::GetPosition(int64* position_out) {
}
bool InMemoryUrlProtocol::SetPosition(int64 position) {
- if (position < 0 || position >= size_)
+ if (position < 0 || position > size_)
return false;
position_ = position;
return true;
diff --git a/chromium/media/filters/mock_gpu_video_accelerator_factories.cc b/chromium/media/filters/mock_gpu_video_accelerator_factories.cc
new file mode 100644
index 00000000000..f4f39973600
--- /dev/null
+++ b/chromium/media/filters/mock_gpu_video_accelerator_factories.cc
@@ -0,0 +1,28 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/mock_gpu_video_accelerator_factories.h"
+
+namespace media {
+
+MockGpuVideoAcceleratorFactories::MockGpuVideoAcceleratorFactories() {}
+
+MockGpuVideoAcceleratorFactories::~MockGpuVideoAcceleratorFactories() {}
+
+scoped_ptr<VideoDecodeAccelerator>
+MockGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator(
+ VideoCodecProfile profile,
+ VideoDecodeAccelerator::Client* client) {
+ return scoped_ptr<VideoDecodeAccelerator>(
+ DoCreateVideoDecodeAccelerator(profile, client));
+}
+
+scoped_ptr<VideoEncodeAccelerator>
+MockGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator(
+ VideoEncodeAccelerator::Client* client) {
+ return scoped_ptr<VideoEncodeAccelerator>(
+ DoCreateVideoEncodeAccelerator(client));
+}
+
+} // namespace media
diff --git a/chromium/media/filters/mock_gpu_video_decoder_factories.h b/chromium/media/filters/mock_gpu_video_accelerator_factories.h
index e0ad274b66f..8aa432d8cfc 100644
--- a/chromium/media/filters/mock_gpu_video_decoder_factories.h
+++ b/chromium/media/filters/mock_gpu_video_accelerator_factories.h
@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_MOCK_GPU_VIDEO_DECODER_FACTORIES_H_
-#define MEDIA_FILTERS_MOCK_GPU_VIDEO_DECODER_FACTORIES_H_
+#ifndef MEDIA_FILTERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#define MEDIA_FILTERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop_proxy.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "media/video/video_decode_accelerator.h"
+#include "media/video/video_encode_accelerator.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/skia/include/core/SkBitmap.h"
@@ -20,12 +22,18 @@ class SharedMemory;
namespace media {
-class MockGpuVideoDecoderFactories : public GpuVideoDecoderFactories {
+class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
public:
- MockGpuVideoDecoderFactories();
- MOCK_METHOD2(CreateVideoDecodeAccelerator,
+ MockGpuVideoAcceleratorFactories();
+
+ // CreateVideo{Decode,Encode}Accelerator returns scoped_ptr, which the mocking
+ // framework does not want. Trampoline them.
+ MOCK_METHOD2(DoCreateVideoDecodeAccelerator,
VideoDecodeAccelerator*(VideoCodecProfile,
VideoDecodeAccelerator::Client*));
+ MOCK_METHOD1(DoCreateVideoEncodeAccelerator,
+ VideoEncodeAccelerator*(VideoEncodeAccelerator::Client*));
+
MOCK_METHOD5(CreateTextures,
uint32(int32 count,
const gfx::Size& size,
@@ -44,12 +52,19 @@ class MockGpuVideoDecoderFactories : public GpuVideoDecoderFactories {
MOCK_METHOD0(Abort, void());
MOCK_METHOD0(IsAborted, bool());
+ virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator(
+ VideoCodecProfile profile,
+ VideoDecodeAccelerator::Client* client) OVERRIDE;
+
+ virtual scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator(
+ VideoEncodeAccelerator::Client* client) OVERRIDE;
+
private:
- virtual ~MockGpuVideoDecoderFactories();
+ virtual ~MockGpuVideoAcceleratorFactories();
- DISALLOW_COPY_AND_ASSIGN(MockGpuVideoDecoderFactories);
+ DISALLOW_COPY_AND_ASSIGN(MockGpuVideoAcceleratorFactories);
};
} // namespace media
-#endif // MEDIA_FILTERS_MOCK_GPU_VIDEO_DECODER_FACTORIES_H_
+#endif // MEDIA_FILTERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
diff --git a/chromium/media/filters/opus_audio_decoder.cc b/chromium/media/filters/opus_audio_decoder.cc
index 115799ab711..b3e903b2313 100644
--- a/chromium/media/filters/opus_audio_decoder.cc
+++ b/chromium/media/filters/opus_audio_decoder.cc
@@ -4,6 +4,8 @@
#include "media/filters/opus_audio_decoder.h"
+#include <cmath>
+
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/location.h"
@@ -250,7 +252,6 @@ OpusAudioDecoder::OpusAudioDecoder(
channel_layout_(CHANNEL_LAYOUT_NONE),
samples_per_second_(0),
last_input_timestamp_(kNoTimestamp()),
- output_bytes_to_drop_(0),
skip_samples_(0) {
}
@@ -457,10 +458,24 @@ bool OpusAudioDecoder::ConfigureDecoder() {
config,
&opus_header);
- skip_samples_ = opus_header.skip_samples;
-
- if (skip_samples_ > 0)
- output_bytes_to_drop_ = skip_samples_ * config.bytes_per_frame();
+ if (!config.codec_delay().InMicroseconds()) {
+ // TODO(vigneshv): Replace this with return false once ffmpeg demuxer code
+ // starts populating the config correctly.
+ skip_samples_ = opus_header.skip_samples;
+ } else {
+ // Convert from seconds to samples.
+ skip_samples_ = std::ceil(config.codec_delay().InMicroseconds() *
+ config.samples_per_second() / 1000000.0);
+ if (skip_samples_ < 0) {
+ DVLOG(1) << "Invalid file. Incorrect value for codec delay.";
+ return false;
+ }
+ if (skip_samples_ != opus_header.skip_samples) {
+ DVLOG(1) << "Invalid file. Codec Delay in container does not match the "
+ << "value in Opus header.";
+ return false;
+ }
+ }
uint8 channel_mapping[kMaxVorbisChannels];
memcpy(&channel_mapping,
@@ -487,9 +502,6 @@ bool OpusAudioDecoder::ConfigureDecoder() {
return false;
}
- // TODO(tomfinegan): Handle audio delay once the matroska spec is updated
- // to represent the value.
-
bits_per_channel_ = config.bits_per_channel();
channel_layout_ = config.channel_layout();
samples_per_second_ = config.samples_per_second();
@@ -508,7 +520,7 @@ void OpusAudioDecoder::CloseDecoder() {
void OpusAudioDecoder::ResetTimestampState() {
output_timestamp_helper_->SetBaseTimestamp(kNoTimestamp());
last_input_timestamp_ = kNoTimestamp();
- output_bytes_to_drop_ = 0;
+ skip_samples_ = 0;
}
bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
@@ -539,16 +551,6 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
output_timestamp_helper_->SetBaseTimestamp(input->timestamp());
}
- if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) {
- int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_);
- DCHECK_EQ(dropped_size % kBytesPerChannel, 0);
- decoded_audio_data += dropped_size;
- decoded_audio_size -= dropped_size;
- output_bytes_to_drop_ -= dropped_size;
- samples_decoded = decoded_audio_size /
- demuxer_stream_->audio_decoder_config().bytes_per_frame();
- }
-
if (decoded_audio_size > 0) {
// Copy the audio samples into an output buffer.
uint8* data[] = { decoded_audio_data };
@@ -560,8 +562,28 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
output_timestamp_helper_->GetTimestamp(),
output_timestamp_helper_->GetFrameDuration(samples_decoded));
output_timestamp_helper_->AddFrames(samples_decoded);
+ if (skip_samples_ > 0) {
+ int dropped_size = std::min(samples_decoded, skip_samples_);
+ output_buffer->get()->TrimStart(dropped_size);
+ skip_samples_ -= dropped_size;
+ samples_decoded -= dropped_size;
+ }
+ if (input->discard_padding().InMicroseconds() > 0) {
+ int discard_padding = std::ceil(
+ input->discard_padding().InMicroseconds() *
+ samples_per_second_ / 1000000.0);
+ if (discard_padding < 0 || discard_padding > samples_decoded) {
+ DVLOG(1) << "Invalid file. Incorrect discard padding value.";
+ return false;
+ }
+ output_buffer->get()->TrimEnd(std::min(samples_decoded, discard_padding));
+ samples_decoded -= discard_padding;
+ }
}
+ decoded_audio_size =
+ samples_decoded *
+ demuxer_stream_->audio_decoder_config().bytes_per_frame();
// Decoding finished successfully, update statistics.
PipelineStatistics statistics;
statistics.audio_bytes_decoded = decoded_audio_size;
diff --git a/chromium/media/filters/opus_audio_decoder.h b/chromium/media/filters/opus_audio_decoder.h
index a808ff34820..77e84344f0c 100644
--- a/chromium/media/filters/opus_audio_decoder.h
+++ b/chromium/media/filters/opus_audio_decoder.h
@@ -70,10 +70,6 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
base::TimeDelta last_input_timestamp_;
- // Number of output sample bytes to drop before generating
- // output buffers.
- int output_bytes_to_drop_;
-
ReadCB read_cb_;
int skip_samples_;
diff --git a/chromium/media/filters/pipeline_integration_test.cc b/chromium/media/filters/pipeline_integration_test.cc
index 26f65b96024..0ce2fd12446 100644
--- a/chromium/media/filters/pipeline_integration_test.cc
+++ b/chromium/media/filters/pipeline_integration_test.cc
@@ -5,11 +5,13 @@
#include "media/filters/pipeline_integration_test_base.h"
#include "base/bind.h"
+#include "base/command_line.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_keys.h"
+#include "media/base/media_switches.h"
#include "media/base/test_data_util.h"
#include "media/cdm/aes_decryptor.h"
#include "media/filters/chunk_demuxer.h"
@@ -26,12 +28,14 @@ static const uint8 kInitData[] = { 0x69, 0x6e, 0x69, 0x74 };
static const char kWebM[] = "video/webm; codecs=\"vp8,vorbis\"";
static const char kWebMVP9[] = "video/webm; codecs=\"vp9\"";
static const char kAudioOnlyWebM[] = "video/webm; codecs=\"vorbis\"";
+static const char kOpusAudioOnlyWebM[] = "video/webm; codecs=\"opus\"";
static const char kVideoOnlyWebM[] = "video/webm; codecs=\"vp8\"";
static const char kMP4[] = "video/mp4; codecs=\"avc1.4D4041,mp4a.40.2\"";
static const char kMP4Video[] = "video/mp4; codecs=\"avc1.4D4041\"";
static const char kMP4Audio[] = "audio/mp4; codecs=\"mp4a.40.2\"";
static const char kMP4AudioType[] = "audio/mp4";
static const char kMP4VideoType[] = "video/mp4";
+static const char kMP3[] = "audio/mpeg";
// Key used to encrypt test files.
static const uint8 kSecretKey[] = {
@@ -55,9 +59,14 @@ static const int k640WebMFileDurationMs = 2763;
static const int k640IsoFileDurationMs = 2737;
static const int k640IsoCencFileDurationMs = 2736;
static const int k1280IsoFileDurationMs = 2736;
+static const int kOpusEndTrimmingWebMFileDurationMs = 2771;
+static const uint32 kOpusEndTrimmingWebMFileAudioBytes = 528676;
static const int kVP9WebMFileDurationMs = 2735;
static const int kVP8AWebMFileDurationMs = 2700;
+// Command line switch for runtime adjustment of audio file to be benchmarked.
+static const char kBenchmarkAudioFile[] = "benchmark-audio-file";
+
// Note: Tests using this class only exercise the DecryptingDemuxerStream path.
// They do not exercise the Decrypting{Audio|Video}Decoder path.
class FakeEncryptedMedia {
@@ -82,7 +91,7 @@ class FakeEncryptedMedia {
virtual void NeedKey(const std::string& session_id,
const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_length,
+ const std::vector<uint8>& init_data,
AesDecryptor* decryptor) = 0;
};
@@ -119,9 +128,8 @@ class FakeEncryptedMedia {
void NeedKey(const std::string& session_id,
const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_length) {
- app_->NeedKey(session_id, type, init_data.Pass(), init_data_length,
- &decryptor_);
+ const std::vector<uint8>& init_data) {
+ app_->NeedKey(session_id, type, init_data, &decryptor_);
}
private:
@@ -147,7 +155,7 @@ class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
virtual void NeedKey(const std::string& session_id,
const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_length,
+ const std::vector<uint8>& init_data,
AesDecryptor* decryptor) OVERRIDE {
current_session_id_ = session_id;
@@ -161,8 +169,8 @@ class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
// Clear Key really needs the key ID in |init_data|. For WebM, they are the
// same, but this is not the case for ISO CENC. Therefore, provide the
// correct key ID.
- const uint8* key_id = init_data.get();
- int key_id_length = init_data_length;
+ const uint8* key_id = init_data.empty() ? NULL : &init_data[0];
+ size_t key_id_length = init_data.size();
if (type == kMP4AudioType || type == kMP4VideoType) {
key_id = kKeyId;
key_id_length = arraysize(kKeyId);
@@ -193,7 +201,7 @@ class NoResponseApp : public FakeEncryptedMedia::AppBase {
virtual void NeedKey(const std::string& session_id,
const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_length,
+ const std::vector<uint8>& init_data,
AesDecryptor* decryptor) OVERRIDE {
}
};
@@ -281,24 +289,39 @@ class MockMediaSource {
}
void DemuxerOpenedTask() {
+ // This code assumes that |mimetype_| is one of the following forms.
+ // 1. audio/mpeg
+ // 2. video/webm;codec="vorbis,vp8".
size_t semicolon = mimetype_.find(";");
- std::string type = mimetype_.substr(0, semicolon);
- size_t quote1 = mimetype_.find("\"");
- size_t quote2 = mimetype_.find("\"", quote1 + 1);
- std::string codecStr = mimetype_.substr(quote1 + 1, quote2 - quote1 - 1);
+ std::string type = mimetype_;
std::vector<std::string> codecs;
- Tokenize(codecStr, ",", &codecs);
+ if (semicolon != std::string::npos) {
+ type = mimetype_.substr(0, semicolon);
+ size_t codecs_param_start = mimetype_.find("codecs=\"", semicolon);
+
+ CHECK_NE(codecs_param_start, std::string::npos);
+
+ codecs_param_start += 8; // Skip over the codecs=".
+
+ size_t codecs_param_end = mimetype_.find("\"", codecs_param_start);
+
+ CHECK_NE(codecs_param_end, std::string::npos);
+
+ std::string codecs_param =
+ mimetype_.substr(codecs_param_start,
+ codecs_param_end - codecs_param_start);
+ Tokenize(codecs_param, ",", &codecs);
+ }
CHECK_EQ(chunk_demuxer_->AddId(kSourceId, type, codecs), ChunkDemuxer::kOk);
AppendData(initial_append_size_);
}
void DemuxerNeedKey(const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_size) {
- DCHECK(init_data.get());
- DCHECK_GT(init_data_size, 0);
+ const std::vector<uint8>& init_data) {
+ DCHECK(!init_data.empty());
CHECK(!need_key_cb_.is_null());
- need_key_cb_.Run(std::string(), type, init_data.Pass(), init_data_size);
+ need_key_cb_.Run(std::string(), type, init_data);
}
scoped_ptr<TextTrack> OnTextTrack(TextKind kind,
@@ -403,8 +426,8 @@ TEST_F(PipelineIntegrationTest, BasicPlayback) {
}
TEST_F(PipelineIntegrationTest, BasicPlaybackHashed) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240.webm"),
- PIPELINE_OK, true));
+ ASSERT_TRUE(Start(
+ GetTestDataFilePath("bear-320x240.webm"), PIPELINE_OK, kHashed));
Play();
@@ -414,8 +437,31 @@ TEST_F(PipelineIntegrationTest, BasicPlaybackHashed) {
EXPECT_EQ("-3.59,-2.06,-0.43,2.15,0.77,-0.95,", GetAudioHash());
}
+TEST_F(PipelineIntegrationTest, AudioPlaybackBenchmark) {
+ // Audio-only files are all that is allowed for clockless playback.
+ // Audio file can be specified on the command line
+ // (--benchmark-audio-file=id3_png_test.mp3), so check for it.
+ std::string filename(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ kBenchmarkAudioFile));
+ if (filename.empty())
+ filename = "sfx_f32le.wav";
+
+ ASSERT_TRUE(Start(GetTestDataFilePath(filename), PIPELINE_OK, kClockless));
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+
+ // Call Stop() to ensure that the rendering is complete.
+ Stop();
+ printf("Clockless playback of %s took %.2f ms.\n",
+ filename.c_str(),
+ GetAudioTime().InMillisecondsF());
+}
+
TEST_F(PipelineIntegrationTest, F32PlaybackHashed) {
- ASSERT_TRUE(Start(GetTestDataFilePath("sfx_f32le.wav"), PIPELINE_OK, true));
+ ASSERT_TRUE(
+ Start(GetTestDataFilePath("sfx_f32le.wav"), PIPELINE_OK, kHashed));
Play();
ASSERT_TRUE(WaitUntilOnEnded());
EXPECT_EQ(std::string(kNullVideoHash), GetVideoHash());
@@ -491,6 +537,26 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VP8A_WebM) {
Stop();
}
+TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
+ EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
+ MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
+ kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
+ pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+ EXPECT_EQ(kOpusEndTrimmingWebMFileAudioBytes,
+ pipeline_->GetStatistics().audio_bytes_decoded);
+ source.Abort();
+ Stop();
+}
+
TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_WebM) {
MockMediaSource source("bear-320x240-16x9-aspect.webm", kWebM,
kAppendWholeFile);
@@ -601,7 +667,28 @@ TEST_F(PipelineIntegrationTest,
source.Abort();
}
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
+TEST_F(PipelineIntegrationTest, MediaSource_MP3) {
+ MockMediaSource source("sfx.mp3", kMP3, kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+}
+
+
+TEST_F(PipelineIntegrationTest, MediaSource_MP3_Icecast) {
+ MockMediaSource source("icy_sfx.mp3", kMP3, kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+}
+
TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_MP4) {
MockMediaSource source("bear-640x360-av_frag.mp4", kMP4, kAppendWholeFile);
StartPipelineWithMediaSource(&source);
@@ -777,7 +864,7 @@ TEST_F(PipelineIntegrationTest, EncryptedPlayback_NoEncryptedFrames_WebM) {
Stop();
}
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_VideoOnly) {
MockMediaSource source("bear-1280x720-v_frag-cenc.mp4",
kMP4Video, kAppendWholeFile);
diff --git a/chromium/media/filters/pipeline_integration_test_base.cc b/chromium/media/filters/pipeline_integration_test_base.cc
index e2567adfbfe..3f0910a2b8b 100644
--- a/chromium/media/filters/pipeline_integration_test_base.cc
+++ b/chromium/media/filters/pipeline_integration_test_base.cc
@@ -26,6 +26,7 @@ const char kNullAudioHash[] = "0.00,0.00,0.00,0.00,0.00,0.00,";
PipelineIntegrationTestBase::PipelineIntegrationTestBase()
: hashing_enabled_(false),
+ clockless_playback_(false),
pipeline_(new Pipeline(message_loop_.message_loop_proxy(),
new MediaLog())),
ended_(false),
@@ -64,12 +65,10 @@ PipelineStatusCB PipelineIntegrationTestBase::QuitOnStatusCB(
void PipelineIntegrationTestBase::DemuxerNeedKeyCB(
const std::string& type,
- scoped_ptr<uint8[]> init_data,
- int init_data_size) {
- DCHECK(init_data.get());
- DCHECK_GT(init_data_size, 0);
+ const std::vector<uint8>& init_data) {
+ DCHECK(!init_data.empty());
CHECK(!need_key_cb_.is_null());
- need_key_cb_.Run(std::string(), type, init_data.Pass(), init_data_size);
+ need_key_cb_.Run(std::string(), type, init_data);
}
void PipelineIntegrationTestBase::OnEnded() {
@@ -120,8 +119,9 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
PipelineStatus expected_status,
- bool hashing_enabled) {
- hashing_enabled_ = hashing_enabled;
+ kTestType test_type) {
+ hashing_enabled_ = test_type == kHashed;
+ clockless_playback_ = test_type == kClockless;
return Start(file_path, expected_status);
}
@@ -210,7 +210,7 @@ PipelineIntegrationTestBase::CreateFilterCollection(
CHECK(file_data_source->Initialize(file_path));
data_source_.reset(file_data_source);
- media::FFmpegNeedKeyCB need_key_cb = base::Bind(
+ Demuxer::NeedKeyCB need_key_cb = base::Bind(
&PipelineIntegrationTestBase::DemuxerNeedKeyCB, base::Unretained(this));
scoped_ptr<Demuxer> demuxer(
new FFmpegDemuxer(message_loop_.message_loop_proxy(),
@@ -229,26 +229,32 @@ PipelineIntegrationTestBase::CreateFilterCollection(
scoped_ptr<FilterCollection> collection(new FilterCollection());
collection->SetDemuxer(demuxer_.get());
- ScopedVector<VideoDecoder> video_decoders;
- video_decoders.push_back(
- new VpxVideoDecoder(message_loop_.message_loop_proxy()));
- video_decoders.push_back(
- new FFmpegVideoDecoder(message_loop_.message_loop_proxy()));
-
- // Disable frame dropping if hashing is enabled.
- scoped_ptr<VideoRenderer> renderer(new VideoRendererBase(
- message_loop_.message_loop_proxy(),
- video_decoders.Pass(),
- base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
- base::Unretained(this), decryptor),
- base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
- base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnSetOpaque,
- base::Unretained(this)),
- !hashing_enabled_));
- collection->SetVideoRenderer(renderer.Pass());
-
- audio_sink_ = new NullAudioSink(message_loop_.message_loop_proxy());
+ if (!clockless_playback_) {
+ ScopedVector<VideoDecoder> video_decoders;
+ video_decoders.push_back(
+ new VpxVideoDecoder(message_loop_.message_loop_proxy()));
+ video_decoders.push_back(
+ new FFmpegVideoDecoder(message_loop_.message_loop_proxy()));
+
+ // Disable frame dropping if hashing is enabled.
+ scoped_ptr<VideoRenderer> renderer(new VideoRendererBase(
+ message_loop_.message_loop_proxy(),
+ video_decoders.Pass(),
+ base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
+ base::Unretained(this),
+ decryptor),
+ base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
+ base::Unretained(this)),
+ base::Bind(&PipelineIntegrationTestBase::OnSetOpaque,
+ base::Unretained(this)),
+ !hashing_enabled_));
+ collection->SetVideoRenderer(renderer.Pass());
+
+ audio_sink_ = new NullAudioSink(message_loop_.message_loop_proxy());
+ } else {
+ // audio only for clockless_playback_
+ clockless_audio_sink_ = new ClocklessAudioSink();
+ }
ScopedVector<AudioDecoder> audio_decoders;
audio_decoders.push_back(
@@ -258,7 +264,9 @@ PipelineIntegrationTestBase::CreateFilterCollection(
AudioRendererImpl* audio_renderer_impl = new AudioRendererImpl(
message_loop_.message_loop_proxy(),
- audio_sink_.get(),
+ (clockless_playback_)
+ ? static_cast<AudioRendererSink*>(clockless_audio_sink_.get())
+ : audio_sink_.get(),
audio_decoders.Pass(),
base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
base::Unretained(this),
@@ -301,4 +309,9 @@ std::string PipelineIntegrationTestBase::GetAudioHash() {
return audio_sink_->GetAudioHashForTesting();
}
+base::TimeDelta PipelineIntegrationTestBase::GetAudioTime() {
+ DCHECK(clockless_playback_);
+ return clockless_audio_sink_->render_time();
+}
+
} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test_base.h b/chromium/media/filters/pipeline_integration_test_base.h
index e9dc0aa1bd1..ade9ad6d974 100644
--- a/chromium/media/filters/pipeline_integration_test_base.h
+++ b/chromium/media/filters/pipeline_integration_test_base.h
@@ -7,6 +7,7 @@
#include "base/md5.h"
#include "base/message_loop/message_loop.h"
+#include "media/audio/clockless_audio_sink.h"
#include "media/audio/null_audio_sink.h"
#include "media/base/filter_collection.h"
#include "media/base/media_keys.h"
@@ -47,10 +48,13 @@ class PipelineIntegrationTestBase {
bool WaitUntilOnEnded();
PipelineStatus WaitUntilEndedOrError();
bool Start(const base::FilePath& file_path, PipelineStatus expected_status);
- // Enable playback with audio and video hashing enabled. Frame dropping and
- // audio underflow will be disabled to ensure consistent hashes.
- bool Start(const base::FilePath& file_path, PipelineStatus expected_status,
- bool hashing_enabled);
+ // Enable playback with audio and video hashing enabled, or clockless
+ // playback (audio only). Frame dropping and audio underflow will be disabled
+ // if hashing enabled to ensure consistent hashes.
+ enum kTestType { kHashed, kClockless };
+ bool Start(const base::FilePath& file_path,
+ PipelineStatus expected_status,
+ kTestType test_type);
// Initialize the pipeline and ignore any status updates. Useful for testing
// invalid audio/video clips which don't have deterministic results.
bool Start(const base::FilePath& file_path);
@@ -75,14 +79,20 @@ class PipelineIntegrationTestBase {
// enabled.
std::string GetAudioHash();
+ // Returns the time taken to render the complete audio file.
+ // Pipeline must have been started with clockless playback enabled.
+ base::TimeDelta GetAudioTime();
+
protected:
base::MessageLoop message_loop_;
base::MD5Context md5_context_;
bool hashing_enabled_;
+ bool clockless_playback_;
scoped_ptr<Demuxer> demuxer_;
scoped_ptr<DataSource> data_source_;
scoped_ptr<Pipeline> pipeline_;
scoped_refptr<NullAudioSink> audio_sink_;
+ scoped_refptr<ClocklessAudioSink> clockless_audio_sink_;
bool ended_;
PipelineStatus pipeline_status_;
NeedKeyCB need_key_cb_;
@@ -93,8 +103,8 @@ class PipelineIntegrationTestBase {
void OnStatusCallback(PipelineStatus status);
PipelineStatusCB QuitOnStatusCB(PipelineStatus expected_status);
void DemuxerNeedKeyCB(const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_size);
- void set_need_key_cb(const NeedKeyCB& need_key_cb) {
+ const std::vector<uint8>& init_data);
+ void set_need_key_cb(const NeedKeyCB& need_key_cb) {
need_key_cb_ = need_key_cb;
}
@@ -103,6 +113,7 @@ class PipelineIntegrationTestBase {
void QuitAfterCurrentTimeTask(const base::TimeDelta& quit_time);
scoped_ptr<FilterCollection> CreateFilterCollection(
scoped_ptr<Demuxer> demuxer, Decryptor* decryptor);
+
void SetDecryptor(Decryptor* decryptor,
const DecryptorReadyCB& decryptor_ready_cb);
void OnVideoRendererPaint(const scoped_refptr<VideoFrame>& frame);
diff --git a/chromium/media/filters/skcanvas_video_renderer.cc b/chromium/media/filters/skcanvas_video_renderer.cc
index 5a889e333a9..f0bf13d4bb8 100644
--- a/chromium/media/filters/skcanvas_video_renderer.cc
+++ b/chromium/media/filters/skcanvas_video_renderer.cc
@@ -56,7 +56,7 @@ static bool CanFastPaint(SkCanvas* canvas, uint8 alpha,
SkScalarNearlyZero(total_matrix.getSkewY()) &&
total_matrix.getScaleX() > 0 &&
total_matrix.getScaleY() > 0) {
- SkDevice* device = canvas->getDevice();
+ SkBaseDevice* device = canvas->getDevice();
const SkBitmap::Config config = device->config();
if (config == SkBitmap::kARGB_8888_Config && device->isOpaque()) {
diff --git a/chromium/media/filters/skcanvas_video_renderer_unittest.cc b/chromium/media/filters/skcanvas_video_renderer_unittest.cc
index e5eff5bd23b..1550dacc63c 100644
--- a/chromium/media/filters/skcanvas_video_renderer_unittest.cc
+++ b/chromium/media/filters/skcanvas_video_renderer_unittest.cc
@@ -5,8 +5,8 @@
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkBitmapDevice.h"
#include "third_party/skia/include/core/SkCanvas.h"
-#include "third_party/skia/include/core/SkDevice.h"
#include "media/filters/skcanvas_video_renderer.h"
using media::VideoFrame;
@@ -75,9 +75,9 @@ class SkCanvasVideoRendererTest : public testing::Test {
scoped_refptr<VideoFrame> smaller_frame_;
scoped_refptr<VideoFrame> cropped_frame_;
- SkDevice fast_path_device_;
+ SkBitmapDevice fast_path_device_;
SkCanvas fast_path_canvas_;
- SkDevice slow_path_device_;
+ SkBitmapDevice slow_path_device_;
SkCanvas slow_path_canvas_;
DISALLOW_COPY_AND_ASSIGN(SkCanvasVideoRendererTest);
diff --git a/chromium/media/filters/source_buffer_stream.cc b/chromium/media/filters/source_buffer_stream.cc
index b0038dd7bbb..7c76c84a049 100644
--- a/chromium/media/filters/source_buffer_stream.cc
+++ b/chromium/media/filters/source_buffer_stream.cc
@@ -471,6 +471,9 @@ bool SourceBufferStream::Append(
void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
base::TimeDelta duration) {
+ DVLOG(1) << __FUNCTION__ << "(" << start.InSecondsF()
+ << ", " << end.InSecondsF()
+ << ", " << duration.InSecondsF() << ")";
DCHECK(start >= base::TimeDelta()) << start.InSecondsF();
DCHECK(start < end) << "start " << start.InSecondsF()
<< " end " << end.InSecondsF();
@@ -497,17 +500,22 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
if (new_range) {
itr = ranges_.insert(++itr, new_range);
--itr;
+
+ // Update the selected range if the next buffer position was transferred
+ // to |new_range|.
+ if (new_range->HasNextBufferPosition())
+ SetSelectedRange(new_range);
}
// If the current range now is completely covered by the removal
// range then delete it and move on.
if (start <= range->GetStartTimestamp()) {
if (selected_range_ == range)
- SetSelectedRange(NULL);
+ SetSelectedRange(NULL);
- delete range;
- itr = ranges_.erase(itr);
- continue;
+ delete range;
+ itr = ranges_.erase(itr);
+ continue;
}
// Truncate the current range so that it only contains data before
@@ -518,6 +526,7 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
// Check to see if the current playback position was removed and
// update the selected range appropriately.
if (!saved_buffers.empty()) {
+ DCHECK(!range->HasNextBufferPosition());
SetSelectedRange(NULL);
SetSelectedRangeIfNeeded(saved_buffers.front()->GetDecodeTimestamp());
}
@@ -525,6 +534,9 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
// Move on to the next range.
++itr;
}
+
+ DCHECK(IsRangeListSorted(ranges_));
+ DCHECK(OnlySelectedRangeIsSeeked());
}
void SourceBufferStream::ResetSeekState() {
@@ -992,6 +1004,8 @@ void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
// Need to partially truncate this range.
if ((*itr)->GetStartTimestamp() < duration) {
(*itr)->TruncateAt(duration, NULL, false);
+ if ((*itr == selected_range_) && !selected_range_->HasNextBufferPosition())
+ SetSelectedRange(NULL);
++itr;
}
@@ -1253,6 +1267,8 @@ void SourceBufferStream::CompleteConfigChange() {
void SourceBufferStream::SetSelectedRangeIfNeeded(
const base::TimeDelta timestamp) {
+ DVLOG(1) << __FUNCTION__ << "(" << timestamp.InSecondsF() << ")";
+
if (selected_range_) {
DCHECK(track_buffer_.empty());
return;
diff --git a/chromium/media/filters/source_buffer_stream_unittest.cc b/chromium/media/filters/source_buffer_stream_unittest.cc
index 3c120745b4e..8b648861e86 100644
--- a/chromium/media/filters/source_buffer_stream_unittest.cc
+++ b/chromium/media/filters/source_buffer_stream_unittest.cc
@@ -2683,6 +2683,31 @@ TEST_F(SourceBufferStreamTest, SetExplicitDuration_DeletePartialSelectedRange) {
CheckExpectedRanges("{ [0,4) [10,10) }");
}
+// Test the case where duration is set while the stream parser buffers
+// already start passing the data to decoding pipeline. Selected range,
+// when invalidated by getting truncated, should be updated to NULL
+// accordingly so that successive append operations keep working.
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_UpdateSelectedRange) {
+ // Seek to start of stream.
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(0));
+
+ NewSegmentAppend("0K 30 60 90");
+
+ // Read out the first few buffers.
+ CheckExpectedBuffers("0K 30");
+
+ // Set duration to be right before buffer 1.
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(60));
+
+ // Verify that there is no next buffer.
+ CheckNoNextBuffer();
+
+ // We should be able to append new buffers at this point.
+ NewSegmentAppend("120K 150");
+
+ CheckExpectedRangesByTimestamp("{ [0,60) [120,180) }");
+}
+
// Test the case were the current playback position is at the end of the
// buffered data and several overlaps occur that causes the selected
// range to get split and then merged back into a single range.
@@ -2940,7 +2965,7 @@ TEST_F(SourceBufferStreamTest, Remove_Partial4) {
CheckExpectedRangesByTimestamp("{ [10,40) [2060,2150) }");
}
-// Test behavior when the current positing is removed and new buffers
+// Test behavior when the current position is removed and new buffers
// are appended over the removal range.
TEST_F(SourceBufferStreamTest, Remove_CurrentPosition) {
Seek(0);
@@ -2964,6 +2989,21 @@ TEST_F(SourceBufferStreamTest, Remove_CurrentPosition) {
CheckExpectedBuffers("210K 240 270K 300 330");
}
+// Test behavior when buffers in the selected range before the current position
+// are removed.
+TEST_F(SourceBufferStreamTest, Remove_BeforeCurrentPosition) {
+ Seek(0);
+ NewSegmentAppend("0K 30 60 90K 120 150 180K 210 240 270K 300 330");
+ CheckExpectedRangesByTimestamp("{ [0,360) }");
+ CheckExpectedBuffers("0K 30 60 90K 120");
+
+ // Remove a range that is before the current playback position.
+ RemoveInMs(0, 90, 360);
+ CheckExpectedRangesByTimestamp("{ [90,360) }");
+
+ CheckExpectedBuffers("150 180K 210 240 270K 300 330");
+}
+
// TODO(vrk): Add unit tests where keyframes are unaligned between streams.
// (crbug.com/133557)
diff --git a/chromium/media/filters/stream_parser_factory.cc b/chromium/media/filters/stream_parser_factory.cc
index 3038d3e737c..c41164b60bb 100644
--- a/chromium/media/filters/stream_parser_factory.cc
+++ b/chromium/media/filters/stream_parser_factory.cc
@@ -10,9 +10,13 @@
#include "base/strings/string_util.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
+#include "media/mp3/mp3_stream_parser.h"
#include "media/webm/webm_stream_parser.h"
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
+#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
+#include "media/mp2t/mp2t_stream_parser.h"
+#endif
#include "media/mp4/es_descriptor.h"
#include "media/mp4/mp4_stream_parser.h"
#endif
@@ -20,7 +24,7 @@
namespace media {
typedef bool (*CodecIDValidatorFunction)(
- const std::string& codecs_id, const media::LogCB& log_cb);
+ const std::string& codecs_id, const LogCB& log_cb);
struct CodecInfo {
enum Type {
@@ -28,6 +32,8 @@ struct CodecInfo {
AUDIO,
VIDEO
};
+
+ // Update tools/metrics/histograms/histograms.xml if new values are added.
enum HistogramTag {
HISTOGRAM_UNKNOWN,
HISTOGRAM_VP8,
@@ -37,6 +43,8 @@ struct CodecInfo {
HISTOGRAM_MPEG2AAC,
HISTOGRAM_MPEG4AAC,
HISTOGRAM_EAC3,
+ HISTOGRAM_MP3,
+ HISTOGRAM_OPUS,
HISTOGRAM_MAX // Must be the last entry.
};
@@ -46,9 +54,9 @@ struct CodecInfo {
HistogramTag tag;
};
-typedef media::StreamParser* (*ParserFactoryFunction)(
+typedef StreamParser* (*ParserFactoryFunction)(
const std::vector<std::string>& codecs,
- const media::LogCB& log_cb);
+ const LogCB& log_cb);
struct SupportedTypeInfo {
const char* type;
@@ -62,6 +70,8 @@ static const CodecInfo kVP9CodecInfo = { "vp9", CodecInfo::VIDEO, NULL,
CodecInfo::HISTOGRAM_VP9 };
static const CodecInfo kVorbisCodecInfo = { "vorbis", CodecInfo::AUDIO, NULL,
CodecInfo::HISTOGRAM_VORBIS };
+static const CodecInfo kOpusCodecInfo = { "opus", CodecInfo::AUDIO, NULL,
+ CodecInfo::HISTOGRAM_OPUS };
static const CodecInfo* kVideoWebMCodecs[] = {
&kVP8CodecInfo,
@@ -71,27 +81,29 @@ static const CodecInfo* kVideoWebMCodecs[] = {
&kVP9CodecInfo,
#endif
&kVorbisCodecInfo,
+ &kOpusCodecInfo,
NULL
};
static const CodecInfo* kAudioWebMCodecs[] = {
&kVorbisCodecInfo,
+ &kOpusCodecInfo,
NULL
};
-static media::StreamParser* BuildWebMParser(
+static StreamParser* BuildWebMParser(
const std::vector<std::string>& codecs,
- const media::LogCB& log_cb) {
- return new media::WebMStreamParser();
+ const LogCB& log_cb) {
+ return new WebMStreamParser();
}
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
// AAC Object Type IDs that Chrome supports.
static const int kAACLCObjectType = 2;
static const int kAACSBRObjectType = 5;
static int GetMP4AudioObjectType(const std::string& codec_id,
- const media::LogCB& log_cb) {
+ const LogCB& log_cb) {
int audio_object_type;
std::vector<std::string> tokens;
if (Tokenize(codec_id, ".", &tokens) != 3 ||
@@ -105,8 +117,7 @@ static int GetMP4AudioObjectType(const std::string& codec_id,
return audio_object_type;
}
-bool ValidateMP4ACodecID(const std::string& codec_id,
- const media::LogCB& log_cb) {
+bool ValidateMP4ACodecID(const std::string& codec_id, const LogCB& log_cb) {
int audio_object_type = GetMP4AudioObjectType(codec_id, log_cb);
if (audio_object_type == kAACLCObjectType ||
audio_object_type == kAACSBRObjectType) {
@@ -149,9 +160,10 @@ static const CodecInfo* kAudioMP4Codecs[] = {
NULL
};
-static media::StreamParser* BuildMP4Parser(
- const std::vector<std::string>& codecs, const media::LogCB& log_cb) {
+static StreamParser* BuildMP4Parser(
+ const std::vector<std::string>& codecs, const LogCB& log_cb) {
std::set<int> audio_object_types;
+
bool has_sbr = false;
#if defined(ENABLE_EAC3_PLAYBACK)
bool enable_eac3 = CommandLine::ForCurrentProcess()->HasSwitch(
@@ -160,12 +172,12 @@ static media::StreamParser* BuildMP4Parser(
for (size_t i = 0; i < codecs.size(); ++i) {
std::string codec_id = codecs[i];
if (MatchPattern(codec_id, kMPEG2AACLCCodecInfo.pattern)) {
- audio_object_types.insert(media::mp4::kISO_13818_7_AAC_LC);
+ audio_object_types.insert(mp4::kISO_13818_7_AAC_LC);
} else if (MatchPattern(codec_id, kMPEG4AACCodecInfo.pattern)) {
int audio_object_type = GetMP4AudioObjectType(codec_id, log_cb);
DCHECK_GT(audio_object_type, 0);
- audio_object_types.insert(media::mp4::kISO_14496_3);
+ audio_object_types.insert(mp4::kISO_14496_3);
if (audio_object_type == kAACSBRObjectType) {
has_sbr = true;
@@ -173,21 +185,53 @@ static media::StreamParser* BuildMP4Parser(
}
#if defined(ENABLE_EAC3_PLAYBACK)
} else if (enable_eac3 && MatchPattern(codec_id, kEAC3CodecInfo.pattern)) {
- audio_object_types.insert(media::mp4::kEAC3);
+ audio_object_types.insert(mp4::kEAC3);
#endif
}
}
- return new media::mp4::MP4StreamParser(audio_object_types, has_sbr);
+ return new mp4::MP4StreamParser(audio_object_types, has_sbr);
+}
+
+static const CodecInfo kMP3CodecInfo = { NULL, CodecInfo::AUDIO, NULL,
+ CodecInfo::HISTOGRAM_MP3 };
+
+static const CodecInfo* kAudioMP3Codecs[] = {
+ &kMP3CodecInfo,
+ NULL
+};
+
+static StreamParser* BuildMP3Parser(
+ const std::vector<std::string>& codecs, const LogCB& log_cb) {
+ return new MP3StreamParser();
+}
+
+#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
+static const CodecInfo* kVideoMP2TCodecs[] = {
+ &kH264CodecInfo,
+ &kMPEG4AACCodecInfo,
+ &kMPEG2AACLCCodecInfo,
+ NULL
+};
+
+static StreamParser* BuildMP2TParser(
+ const std::vector<std::string>& codecs, const media::LogCB& log_cb) {
+ return new media::mp2t::Mp2tStreamParser();
}
#endif
+#endif
+
static const SupportedTypeInfo kSupportedTypeInfo[] = {
{ "video/webm", &BuildWebMParser, kVideoWebMCodecs },
{ "audio/webm", &BuildWebMParser, kAudioWebMCodecs },
-#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+#if defined(USE_PROPRIETARY_CODECS)
+ { "audio/mpeg", &BuildMP3Parser, kAudioMP3Codecs },
{ "video/mp4", &BuildMP4Parser, kVideoMP4Codecs },
{ "audio/mp4", &BuildMP4Parser, kAudioMP4Codecs },
+#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
+ { "video/mp2t", &BuildMP2TParser, kVideoMP2TCodecs },
+#endif
#endif
};
@@ -213,6 +257,11 @@ static bool VerifyCodec(
return false;
}
#endif
+ if (codec_info->tag == CodecInfo::HISTOGRAM_OPUS) {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (!cmd_line->HasSwitch(switches::kEnableOpusPlayback))
+ return false;
+ }
if (audio_codecs)
audio_codecs->push_back(codec_info->tag);
return true;
@@ -245,16 +294,40 @@ static bool VerifyCodec(
static bool CheckTypeAndCodecs(
const std::string& type,
const std::vector<std::string>& codecs,
- const media::LogCB& log_cb,
+ const LogCB& log_cb,
ParserFactoryFunction* factory_function,
std::vector<CodecInfo::HistogramTag>* audio_codecs,
std::vector<CodecInfo::HistogramTag>* video_codecs) {
- DCHECK_GT(codecs.size(), 0u);
// Search for the SupportedTypeInfo for |type|.
for (size_t i = 0; i < arraysize(kSupportedTypeInfo); ++i) {
const SupportedTypeInfo& type_info = kSupportedTypeInfo[i];
if (type == type_info.type) {
+ if (codecs.empty()) {
+
+#if defined(USE_PROPRIETARY_CODECS)
+ if (type_info.codecs == kAudioMP3Codecs &&
+ !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableMP3StreamParser)) {
+ DVLOG(1) << "MP3StreamParser is not enabled.";
+ return false;
+ }
+#endif
+
+ const CodecInfo* codec_info = type_info.codecs[0];
+ if (codec_info && !codec_info->pattern &&
+ VerifyCodec(codec_info, audio_codecs, video_codecs)) {
+
+ if (factory_function)
+ *factory_function = type_info.factory_function;
+ return true;
+ }
+
+ MEDIA_LOG(log_cb) << "A codecs parameter must be provided for '"
+ << type << "'";
+ return false;
+ }
+
// Make sure all the codecs specified in |codecs| are
// in the supported type info.
for (size_t j = 0; j < codecs.size(); ++j) {
@@ -270,6 +343,7 @@ static bool CheckTypeAndCodecs(
break; // Since only 1 pattern will match, no need to check others.
}
}
+
if (!found_codec) {
MEDIA_LOG(log_cb) << "Codec '" << codec_id
<< "' is not supported for '" << type << "'";
@@ -291,16 +365,16 @@ static bool CheckTypeAndCodecs(
bool StreamParserFactory::IsTypeSupported(
const std::string& type, const std::vector<std::string>& codecs) {
- return CheckTypeAndCodecs(type, codecs, media::LogCB(), NULL, NULL, NULL);
+ return CheckTypeAndCodecs(type, codecs, LogCB(), NULL, NULL, NULL);
}
-scoped_ptr<media::StreamParser> StreamParserFactory::Create(
+scoped_ptr<StreamParser> StreamParserFactory::Create(
const std::string& type,
const std::vector<std::string>& codecs,
- const media::LogCB& log_cb,
+ const LogCB& log_cb,
bool* has_audio,
bool* has_video) {
- scoped_ptr<media::StreamParser> stream_parser;
+ scoped_ptr<StreamParser> stream_parser;
ParserFactoryFunction factory_function;
std::vector<CodecInfo::HistogramTag> audio_codecs;
std::vector<CodecInfo::HistogramTag> video_codecs;
diff --git a/chromium/media/filters/stream_parser_factory.h b/chromium/media/filters/stream_parser_factory.h
index ccf394150bc..1f9ad347d12 100644
--- a/chromium/media/filters/stream_parser_factory.h
+++ b/chromium/media/filters/stream_parser_factory.h
@@ -32,7 +32,7 @@ class MEDIA_EXPORT StreamParserFactory {
// |has_video| is true if a video codec was specified.
// Returns NULL otherwise. The values of |has_audio| and |has_video| are
// undefined.
- static scoped_ptr<media::StreamParser> Create(
+ static scoped_ptr<StreamParser> Create(
const std::string& type, const std::vector<std::string>& codecs,
const LogCB& log_cb, bool* has_audio, bool* has_video);
};
diff --git a/chromium/media/filters/video_decoder_selector.cc b/chromium/media/filters/video_decoder_selector.cc
index f75a95dc28c..e961a316497 100644
--- a/chromium/media/filters/video_decoder_selector.cc
+++ b/chromium/media/filters/video_decoder_selector.cc
@@ -28,12 +28,15 @@ VideoDecoderSelector::VideoDecoderSelector(
weak_ptr_factory_(this) {
}
-VideoDecoderSelector::~VideoDecoderSelector() {}
+VideoDecoderSelector::~VideoDecoderSelector() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(select_decoder_cb_.is_null());
+}
void VideoDecoderSelector::SelectVideoDecoder(
DemuxerStream* stream,
const SelectDecoderCB& select_decoder_cb) {
- DVLOG(2) << "SelectVideoDecoder()";
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(stream);
@@ -43,22 +46,20 @@ void VideoDecoderSelector::SelectVideoDecoder(
const VideoDecoderConfig& config = stream->video_decoder_config();
if (!config.IsValidConfig()) {
DLOG(ERROR) << "Invalid video stream config.";
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<VideoDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ ReturnNullDecoder();
return;
}
input_stream_ = stream;
if (!config.is_encrypted()) {
- InitializeDecoder(decoders_.begin());
+ InitializeDecoder();
return;
}
// This could happen if Encrypted Media Extension (EME) is not enabled.
if (set_decryptor_ready_cb_.is_null()) {
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<VideoDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ ReturnNullDecoder();
return;
}
@@ -67,13 +68,46 @@ void VideoDecoderSelector::SelectVideoDecoder(
video_decoder_->Initialize(
input_stream_->video_decoder_config(),
- BindToCurrentLoop(base::Bind(
- &VideoDecoderSelector::DecryptingVideoDecoderInitDone,
- weak_ptr_factory_.GetWeakPtr())));
+ base::Bind(&VideoDecoderSelector::DecryptingVideoDecoderInitDone,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+void VideoDecoderSelector::Abort() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // This could happen when SelectVideoDecoder() was not called or when
+ // |select_decoder_cb_| was already posted but not fired (e.g. in the
+ // message loop queue).
+ if (select_decoder_cb_.is_null())
+ return;
+
+ // We must be trying to initialize the |video_decoder_| or the
+ // |decrypted_stream_|. Invalid all weak pointers so that all initialization
+ // callbacks won't fire.
+ weak_ptr_factory_.InvalidateWeakPtrs();
+
+ if (video_decoder_) {
+ // |decrypted_stream_| is either NULL or already initialized. We don't
+ // need to Reset() |decrypted_stream_| in either case.
+ video_decoder_->Stop(base::Bind(&VideoDecoderSelector::ReturnNullDecoder,
+ weak_ptr_factory_.GetWeakPtr()));
+ return;
+ }
+
+ if (decrypted_stream_) {
+ decrypted_stream_->Reset(
+ base::Bind(&VideoDecoderSelector::ReturnNullDecoder,
+ weak_ptr_factory_.GetWeakPtr()));
+ return;
+ }
+
+ NOTREACHED();
}
void VideoDecoderSelector::DecryptingVideoDecoderInitDone(
PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
if (status == PIPELINE_OK) {
@@ -82,63 +116,69 @@ void VideoDecoderSelector::DecryptingVideoDecoderInitDone(
return;
}
+ video_decoder_.reset();
+
decrypted_stream_.reset(new DecryptingDemuxerStream(
message_loop_, set_decryptor_ready_cb_));
decrypted_stream_->Initialize(
input_stream_,
- BindToCurrentLoop(base::Bind(
- &VideoDecoderSelector::DecryptingDemuxerStreamInitDone,
- weak_ptr_factory_.GetWeakPtr())));
+ base::Bind(&VideoDecoderSelector::DecryptingDemuxerStreamInitDone,
+ weak_ptr_factory_.GetWeakPtr()));
}
void VideoDecoderSelector::DecryptingDemuxerStreamInitDone(
PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
if (status != PIPELINE_OK) {
- decrypted_stream_.reset();
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<VideoDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ ReturnNullDecoder();
return;
}
DCHECK(!decrypted_stream_->video_decoder_config().is_encrypted());
input_stream_ = decrypted_stream_.get();
- InitializeDecoder(decoders_.begin());
+ InitializeDecoder();
}
-void VideoDecoderSelector::InitializeDecoder(
- ScopedVector<VideoDecoder>::iterator iter) {
+void VideoDecoderSelector::InitializeDecoder() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!video_decoder_);
- if (iter == decoders_.end()) {
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<VideoDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ if (decoders_.empty()) {
+ ReturnNullDecoder();
return;
}
- (*iter)->Initialize(
- input_stream_->video_decoder_config(),
- BindToCurrentLoop(base::Bind(&VideoDecoderSelector::DecoderInitDone,
- weak_ptr_factory_.GetWeakPtr(),
- iter)));
+ video_decoder_.reset(decoders_.front());
+ decoders_.weak_erase(decoders_.begin());
+
+ video_decoder_->Initialize(input_stream_->video_decoder_config(),
+ base::Bind(&VideoDecoderSelector::DecoderInitDone,
+ weak_ptr_factory_.GetWeakPtr()));
}
-void VideoDecoderSelector::DecoderInitDone(
- ScopedVector<VideoDecoder>::iterator iter, PipelineStatus status) {
+void VideoDecoderSelector::DecoderInitDone(PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
if (status != PIPELINE_OK) {
- InitializeDecoder(++iter);
+ video_decoder_.reset();
+ InitializeDecoder();
return;
}
- scoped_ptr<VideoDecoder> video_decoder(*iter);
- decoders_.weak_erase(iter);
-
- base::ResetAndReturn(&select_decoder_cb_).Run(video_decoder.Pass(),
+ base::ResetAndReturn(&select_decoder_cb_).Run(video_decoder_.Pass(),
decrypted_stream_.Pass());
}
+void VideoDecoderSelector::ReturnNullDecoder() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ base::ResetAndReturn(&select_decoder_cb_).Run(
+ scoped_ptr<VideoDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+}
+
} // namespace media
diff --git a/chromium/media/filters/video_decoder_selector.h b/chromium/media/filters/video_decoder_selector.h
index 105b51372dc..90e0dd51a49 100644
--- a/chromium/media/filters/video_decoder_selector.h
+++ b/chromium/media/filters/video_decoder_selector.h
@@ -57,12 +57,16 @@ class MEDIA_EXPORT VideoDecoderSelector {
void SelectVideoDecoder(DemuxerStream* stream,
const SelectDecoderCB& select_decoder_cb);
+ // Aborts pending VideoDecoder selection and fires |select_decoder_cb| with
+ // NULL and NULL immediately if it's pending.
+ void Abort();
+
private:
void DecryptingVideoDecoderInitDone(PipelineStatus status);
void DecryptingDemuxerStreamInitDone(PipelineStatus status);
- void InitializeDecoder(ScopedVector<VideoDecoder>::iterator iter);
- void DecoderInitDone(ScopedVector<VideoDecoder>::iterator iter,
- PipelineStatus status);
+ void InitializeDecoder();
+ void DecoderInitDone(PipelineStatus status);
+ void ReturnNullDecoder();
scoped_refptr<base::MessageLoopProxy> message_loop_;
ScopedVector<VideoDecoder> decoders_;
diff --git a/chromium/media/filters/video_decoder_selector_unittest.cc b/chromium/media/filters/video_decoder_selector_unittest.cc
index f42c583f3a5..ddb53bc315d 100644
--- a/chromium/media/filters/video_decoder_selector_unittest.cc
+++ b/chromium/media/filters/video_decoder_selector_unittest.cc
@@ -26,6 +26,10 @@ class VideoDecoderSelectorTest : public ::testing::Test {
public:
enum DecryptorCapability {
kNoDecryptor,
+ // Used to test Abort() during DecryptingVideoDecoder::Initialize() and
+ // DecryptingDemuxerStream::Initialize(). We don't need this for normal
+ // VideoDecoders since we use MockVideoDecoder.
+ kHoldSetDecryptor,
kDecryptOnly,
kDecryptAndDecode
};
@@ -76,12 +80,14 @@ class VideoDecoderSelectorTest : public ::testing::Test {
void InitializeDecoderSelector(DecryptorCapability decryptor_capability,
int num_decoders) {
SetDecryptorReadyCB set_decryptor_ready_cb;
+ if (decryptor_capability != kNoDecryptor) {
+ set_decryptor_ready_cb =
+ base::Bind(&VideoDecoderSelectorTest::SetDecryptorReadyCallback,
+ base::Unretained(this));
+ }
+
if (decryptor_capability == kDecryptOnly ||
decryptor_capability == kDecryptAndDecode) {
- set_decryptor_ready_cb = base::Bind(
- &VideoDecoderSelectorTest::SetDecryptorReadyCallback,
- base::Unretained(this));
-
EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
.WillRepeatedly(RunCallback<0>(decryptor_.get()));
@@ -92,6 +98,10 @@ class VideoDecoderSelectorTest : public ::testing::Test {
EXPECT_CALL(*decryptor_, InitializeVideoDecoder(_, _))
.WillRepeatedly(RunCallback<1>(true));
}
+ } else if (decryptor_capability == kHoldSetDecryptor) {
+ // Set and cancel DecryptorReadyCB but the callback is never fired.
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
+ .Times(2);
}
DCHECK_GE(all_decoders_.size(), static_cast<size_t>(num_decoders));
@@ -112,6 +122,14 @@ class VideoDecoderSelectorTest : public ::testing::Test {
message_loop_.RunUntilIdle();
}
+ void SelectDecoderAndAbort() {
+ SelectDecoder();
+
+ EXPECT_CALL(*this, OnDecoderSelected(IsNull(), IsNull()));
+ decoder_selector_->Abort();
+ message_loop_.RunUntilIdle();
+ }
+
// Fixture members.
scoped_ptr<VideoDecoderSelector> decoder_selector_;
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
@@ -154,6 +172,16 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_OneClearDecoder) {
SelectDecoder();
}
+TEST_F(VideoDecoderSelectorTest,
+ Abort_ClearStream_NoDecryptor_OneClearDecoder) {
+ UseClearStream();
+ InitializeDecoderSelector(kNoDecryptor, 1);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _));
+
+ SelectDecoderAndAbort();
+}
+
// The stream is not encrypted and we have multiple clear decoders. The first
// decoder that can decode the input stream will be selected.
TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
@@ -169,6 +197,18 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
SelectDecoder();
}
+TEST_F(VideoDecoderSelectorTest,
+ Abort_ClearStream_NoDecryptor_MultipleClearDecoder) {
+ UseClearStream();
+ InitializeDecoderSelector(kNoDecryptor, 2);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _))
+ .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _));
+
+ SelectDecoderAndAbort();
+}
+
// There is a decryptor but the stream is not encrypted. The decoder will be
// selected.
TEST_F(VideoDecoderSelectorTest, ClearStream_HasDecryptor) {
@@ -182,6 +222,15 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_HasDecryptor) {
SelectDecoder();
}
+TEST_F(VideoDecoderSelectorTest, Abort_ClearStream_HasDecryptor) {
+ UseClearStream();
+ InitializeDecoderSelector(kDecryptOnly, 1);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _));
+
+ SelectDecoderAndAbort();
+}
+
// The stream is encrypted and there's no decryptor. No decoder can be selected.
TEST_F(VideoDecoderSelectorTest, EncryptedStream_NoDecryptor) {
UseEncryptedStream();
@@ -203,6 +252,14 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptOnly_NoClearDecoder) {
SelectDecoder();
}
+TEST_F(VideoDecoderSelectorTest,
+ Abort_EncryptedStream_DecryptOnly_NoClearDecoder) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kHoldSetDecryptor, 0);
+
+ SelectDecoderAndAbort();
+}
+
// Decryptor can do decryption-only and there's a decoder available. The decoder
// will be selected and a DecryptingDemuxerStream will be created.
TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
@@ -216,6 +273,16 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
SelectDecoder();
}
+TEST_F(VideoDecoderSelectorTest,
+ Abort_EncryptedStream_DecryptOnly_OneClearDecoder) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kDecryptOnly, 1);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _));
+
+ SelectDecoderAndAbort();
+}
+
// Decryptor can only do decryption and there are multiple decoders available.
// The first decoder that can decode the input stream will be selected and
// a DecryptingDemuxerStream will be created.
@@ -233,6 +300,18 @@ TEST_F(VideoDecoderSelectorTest,
SelectDecoder();
}
+TEST_F(VideoDecoderSelectorTest,
+ Abort_EncryptedStream_DecryptOnly_MultipleClearDecoder) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kDecryptOnly, 2);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _))
+ .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _));
+
+ SelectDecoderAndAbort();
+}
+
// Decryptor can do decryption and decoding. A DecryptingVideoDecoder will be
// created and selected. The clear decoders should not be touched at all.
// No DecryptingDemuxerStream should to be created.
@@ -245,4 +324,11 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptAndDecode) {
SelectDecoder();
}
+TEST_F(VideoDecoderSelectorTest, Abort_EncryptedStream_DecryptAndDecode) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kHoldSetDecryptor, 1);
+
+ SelectDecoderAndAbort();
+}
+
} // namespace media
diff --git a/chromium/media/filters/video_frame_stream.cc b/chromium/media/filters/video_frame_stream.cc
index 73b136c8b79..80e59371492 100644
--- a/chromium/media/filters/video_frame_stream.cc
+++ b/chromium/media/filters/video_frame_stream.cc
@@ -28,7 +28,8 @@ VideoFrameStream::VideoFrameStream(
stream_(NULL),
decoder_selector_(new VideoDecoderSelector(message_loop,
decoders.Pass(),
- set_decryptor_ready_cb)) {}
+ set_decryptor_ready_cb)) {
+}
VideoFrameStream::~VideoFrameStream() {
DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_STOPPED) << state_;
@@ -37,6 +38,7 @@ VideoFrameStream::~VideoFrameStream() {
void VideoFrameStream::Initialize(DemuxerStream* stream,
const StatisticsCB& statistics_cb,
const InitCB& init_cb) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_UNINITIALIZED) << state_;
DCHECK(init_cb_.is_null());
@@ -55,6 +57,7 @@ void VideoFrameStream::Initialize(DemuxerStream* stream,
}
void VideoFrameStream::Read(const ReadCB& read_cb) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
state_ == STATE_ERROR) << state_;
@@ -81,6 +84,7 @@ void VideoFrameStream::Read(const ReadCB& read_cb) {
}
void VideoFrameStream::Reset(const base::Closure& closure) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
DCHECK(reset_cb_.is_null());
@@ -113,16 +117,20 @@ void VideoFrameStream::Reset(const base::Closure& closure) {
}
void VideoFrameStream::Stop(const base::Closure& closure) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_NE(state_, STATE_STOPPED) << state_;
DCHECK(stop_cb_.is_null());
stop_cb_ = closure;
+ if (state_ == STATE_INITIALIZING) {
+ decoder_selector_->Abort();
+ return;
+ }
+
// The stopping process will continue after the pending operation is finished.
- // TODO(xhwang): Now we cannot stop the initialization process through
- // VideoDecoderSelector. Fix this. See: http://crbug.com/222054
- if (state_ == STATE_INITIALIZING || state_ == STATE_PENDING_DEMUXER_READ)
+ if (state_ == STATE_PENDING_DEMUXER_READ)
return;
// VideoDecoder API guarantees that if VideoDecoder::Stop() is called during
@@ -157,6 +165,7 @@ bool VideoFrameStream::CanReadWithoutStalling() const {
void VideoFrameStream::OnDecoderSelected(
scoped_ptr<VideoDecoder> selected_decoder,
scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
DCHECK(!init_cb_.is_null());
@@ -199,6 +208,7 @@ void VideoFrameStream::AbortRead() {
}
void VideoFrameStream::Decode(const scoped_refptr<DecoderBuffer>& buffer) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
DCHECK(!read_cb_.is_null());
DCHECK(reset_cb_.is_null());
@@ -217,6 +227,7 @@ void VideoFrameStream::FlushDecoder() {
void VideoFrameStream::OnFrameReady(int buffer_size,
const VideoDecoder::Status status,
const scoped_refptr<VideoFrame>& frame) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
DCHECK(!read_cb_.is_null());
@@ -268,6 +279,7 @@ void VideoFrameStream::OnFrameReady(int buffer_size,
}
void VideoFrameStream::ReadFromDemuxerStream() {
+ DVLOG(2) << __FUNCTION__;
DCHECK_EQ(state_, STATE_NORMAL) << state_;
DCHECK(!read_cb_.is_null());
DCHECK(reset_cb_.is_null());
@@ -280,6 +292,7 @@ void VideoFrameStream::ReadFromDemuxerStream() {
void VideoFrameStream::OnBufferReady(
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_PENDING_DEMUXER_READ) << state_;
DCHECK_EQ(buffer.get() != NULL, status == DemuxerStream::kOk) << status;
@@ -326,6 +339,7 @@ void VideoFrameStream::OnBufferReady(
}
void VideoFrameStream::ReinitializeDecoder() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_FLUSHING_DECODER) << state_;
@@ -337,6 +351,7 @@ void VideoFrameStream::ReinitializeDecoder() {
}
void VideoFrameStream::OnDecoderReinitialized(PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER) << state_;
@@ -373,6 +388,7 @@ void VideoFrameStream::OnDecoderReinitialized(PipelineStatus status) {
}
void VideoFrameStream::ResetDecoder() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
state_ == STATE_ERROR) << state_;
@@ -382,6 +398,7 @@ void VideoFrameStream::ResetDecoder() {
}
void VideoFrameStream::OnDecoderReset() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
state_ == STATE_ERROR) << state_;
@@ -400,6 +417,7 @@ void VideoFrameStream::OnDecoderReset() {
}
void VideoFrameStream::StopDecoder() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
DCHECK(!stop_cb_.is_null());
@@ -408,6 +426,7 @@ void VideoFrameStream::StopDecoder() {
}
void VideoFrameStream::OnDecoderStopped() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
// If Stop() was called during pending read/reset, read/reset callback should
diff --git a/chromium/media/filters/video_frame_stream_unittest.cc b/chromium/media/filters/video_frame_stream_unittest.cc
index 524f6aa6daf..e57510563f9 100644
--- a/chromium/media/filters/video_frame_stream_unittest.cc
+++ b/chromium/media/filters/video_frame_stream_unittest.cc
@@ -36,6 +36,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
decoder_(new FakeVideoDecoder(kDecodingDelay)),
is_initialized_(false),
num_decoded_frames_(0),
+ pending_initialize_(false),
pending_read_(false),
pending_reset_(false),
pending_stop_(false),
@@ -49,9 +50,6 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
base::Bind(&VideoFrameStreamTest::SetDecryptorReadyCallback,
base::Unretained(this))));
- EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
- .WillRepeatedly(RunCallback<0>(decryptor_.get()));
-
// Decryptor can only decrypt (not decrypt-and-decode) so that
// DecryptingDemuxerStream will be used.
EXPECT_CALL(*decryptor_, InitializeVideoDecoder(_, _))
@@ -61,26 +59,43 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
}
~VideoFrameStreamTest() {
+ DCHECK(!pending_initialize_);
DCHECK(!pending_read_);
DCHECK(!pending_reset_);
DCHECK(!pending_stop_);
- // Check that the pipeline statistics callback was fired correctly.
- if (decoder_)
- EXPECT_EQ(decoder_->total_bytes_decoded(), total_bytes_decoded_);
-
if (is_initialized_)
Stop();
EXPECT_FALSE(is_initialized_);
}
MOCK_METHOD1(SetDecryptorReadyCallback, void(const media::DecryptorReadyCB&));
- MOCK_METHOD2(OnInitialized, void(bool, bool));
void OnStatistics(const PipelineStatistics& statistics) {
total_bytes_decoded_ += statistics.video_bytes_decoded;
}
+ void OnInitialized(bool success, bool has_alpha) {
+ DCHECK(!pending_read_);
+ DCHECK(!pending_reset_);
+ DCHECK(pending_initialize_);
+ pending_initialize_ = false;
+
+ is_initialized_ = success;
+ if (!success)
+ decoder_ = NULL;
+ }
+
+ void InitializeVideoFrameStream() {
+ pending_initialize_ = true;
+ video_frame_stream_->Initialize(
+ demuxer_stream_.get(),
+ base::Bind(&VideoFrameStreamTest::OnStatistics, base::Unretained(this)),
+ base::Bind(&VideoFrameStreamTest::OnInitialized,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
// Fake Decrypt() function used by DecryptingDemuxerStream. It does nothing
// but removes the DecryptConfig to make the buffer unencrypted.
void Decrypt(Decryptor::StreamType stream_type,
@@ -120,6 +135,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
DCHECK(pending_stop_);
pending_stop_ = false;
is_initialized_ = false;
+ decoder_ = NULL;
}
void ReadUntilPending() {
@@ -136,6 +152,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
NOT_PENDING,
DEMUXER_READ_NORMAL,
DEMUXER_READ_CONFIG_CHANGE,
+ SET_DECRYPTOR,
DECODER_INIT,
DECODER_REINIT,
DECODER_READ,
@@ -156,15 +173,19 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
ReadUntilPending();
break;
+ case SET_DECRYPTOR:
+ // Hold DecryptorReadyCB.
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
+ .Times(2);
+ // Initialize will fail because no decryptor is available.
+ InitializeVideoFrameStream();
+ break;
+
case DECODER_INIT:
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
+ .WillRepeatedly(RunCallback<0>(decryptor_.get()));
decoder_->HoldNextInit();
- video_frame_stream_->Initialize(
- demuxer_stream_.get(),
- base::Bind(&VideoFrameStreamTest::OnStatistics,
- base::Unretained(this)),
- base::Bind(&VideoFrameStreamTest::OnInitialized,
- base::Unretained(this)));
- message_loop_.RunUntilIdle();
+ InitializeVideoFrameStream();
break;
case DECODER_REINIT:
@@ -187,6 +208,8 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
case DECODER_STOP:
decoder_->HoldNextStop();
+ // Check that the pipeline statistics callback was fired correctly.
+ EXPECT_EQ(decoder_->total_bytes_decoded(), total_bytes_decoded_);
pending_stop_ = true;
video_frame_stream_->Stop(base::Bind(&VideoFrameStreamTest::OnStopped,
base::Unretained(this)));
@@ -207,9 +230,13 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
demuxer_stream_->SatisfyRead();
break;
+ case SET_DECRYPTOR:
+ // VideoFrameStream::Stop() does not wait for pending DecryptorReadyCB.
+ // Therefore there's no need to satisfy a callback.
+ NOTREACHED();
+ break;
+
case DECODER_INIT:
- EXPECT_CALL(*this, OnInitialized(true, false))
- .WillOnce(SaveArg<0>(&is_initialized_));
decoder_->SatisfyInit();
break;
@@ -238,8 +265,6 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
}
message_loop_.RunUntilIdle();
- if (!is_initialized_)
- decoder_ = NULL;
}
void Initialize() {
@@ -273,6 +298,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
bool is_initialized_;
int num_decoded_frames_;
+ bool pending_initialize_;
bool pending_read_;
bool pending_reset_;
bool pending_stop_;
@@ -389,6 +415,19 @@ TEST_P(VideoFrameStreamTest, Stop_BeforeInitialization) {
message_loop_.RunUntilIdle();
}
+TEST_P(VideoFrameStreamTest, Stop_DuringSetDecryptor) {
+ if (!GetParam()) {
+ DVLOG(1) << "SetDecryptor test only runs when the stream is encrytped.";
+ return;
+ }
+
+ EnterPendingState(SET_DECRYPTOR);
+ pending_stop_ = true;
+ video_frame_stream_->Stop(
+ base::Bind(&VideoFrameStreamTest::OnStopped, base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+}
+
TEST_P(VideoFrameStreamTest, Stop_DuringInitialization) {
EnterPendingState(DECODER_INIT);
EnterPendingState(DECODER_STOP);
diff --git a/chromium/media/filters/vpx_video_decoder.cc b/chromium/media/filters/vpx_video_decoder.cc
index 3f125943ef0..3c02d15f906 100644
--- a/chromium/media/filters/vpx_video_decoder.cc
+++ b/chromium/media/filters/vpx_video_decoder.cc
@@ -121,7 +121,7 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
bool can_handle = false;
if (config.codec() == kCodecVP9)
can_handle = true;
- if (cmd_line->HasSwitch(switches::kEnableVp8AlphaPlayback) &&
+ if (!cmd_line->HasSwitch(switches::kDisableVp8AlphaPlayback) &&
config.codec() == kCodecVP8 && config.format() == VideoFrame::YV12A) {
can_handle = true;
}
diff --git a/chromium/media/filters/wsola_internals.cc b/chromium/media/filters/wsola_internals.cc
new file mode 100644
index 00000000000..45cdd8ffad5
--- /dev/null
+++ b/chromium/media/filters/wsola_internals.cc
@@ -0,0 +1,264 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "media/filters/wsola_internals.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_bus.h"
+
+namespace media {
+
+namespace internal {
+
+bool InInterval(int n, Interval q) {
+ return n >= q.first && n <= q.second;
+}
+
+float MultiChannelSimilarityMeasure(const float* dot_prod_a_b,
+ const float* energy_a,
+ const float* energy_b,
+ int channels) {
+ const float kEpsilon = 1e-12f;
+ float similarity_measure = 0.0f;
+ for (int n = 0; n < channels; ++n) {
+ similarity_measure += dot_prod_a_b[n] / sqrt(energy_a[n] * energy_b[n] +
+ kEpsilon);
+ }
+ return similarity_measure;
+}
+
+void MultiChannelDotProduct(const AudioBus* a,
+ int frame_offset_a,
+ const AudioBus* b,
+ int frame_offset_b,
+ int num_frames,
+ float* dot_product) {
+ DCHECK_EQ(a->channels(), b->channels());
+ DCHECK_GE(frame_offset_a, 0);
+ DCHECK_GE(frame_offset_b, 0);
+ DCHECK_LE(frame_offset_a + num_frames, a->frames());
+ DCHECK_LE(frame_offset_b + num_frames, b->frames());
+
+ memset(dot_product, 0, sizeof(*dot_product) * a->channels());
+ for (int k = 0; k < a->channels(); ++k) {
+ const float* ch_a = a->channel(k) + frame_offset_a;
+ const float* ch_b = b->channel(k) + frame_offset_b;
+ for (int n = 0; n < num_frames; ++n) {
+ dot_product[k] += *ch_a++ * *ch_b++;
+ }
+ }
+}
+
+void MultiChannelMovingBlockEnergies(const AudioBus* input,
+ int frames_per_block,
+ float* energy) {
+ int num_blocks = input->frames() - (frames_per_block - 1);
+ int channels = input->channels();
+
+ for (int k = 0; k < input->channels(); ++k) {
+ const float* input_channel = input->channel(k);
+
+ energy[k] = 0;
+
+ // First block of channel |k|.
+ for (int m = 0; m < frames_per_block; ++m) {
+ energy[k] += input_channel[m] * input_channel[m];
+ }
+
+ const float* slide_out = input_channel;
+ const float* slide_in = input_channel + frames_per_block;
+ for (int n = 1; n < num_blocks; ++n, ++slide_in, ++slide_out) {
+ energy[k + n * channels] = energy[k + (n - 1) * channels] - *slide_out *
+ *slide_out + *slide_in * *slide_in;
+ }
+ }
+}
+
+// Fit the curve f(x) = a * x^2 + b * x + c such that
+// f(-1) = |y[0]|
+// f(0) = |y[1]|
+// f(1) = |y[2]|.
+void CubicInterpolation(const float* y_values,
+ float* extremum,
+ float* extremum_value) {
+ float a = 0.5f * (y_values[2] + y_values[0]) - y_values[1];
+ float b = 0.5f * (y_values[2] - y_values[0]);
+ float c = y_values[1];
+
+ DCHECK_NE(a, 0);
+ *extremum = -b / (2.f * a);
+ *extremum_value = a * (*extremum) * (*extremum) + b * (*extremum) + c;
+}
+
+int DecimatedSearch(int decimation,
+ Interval exclude_interval,
+ const AudioBus* target_block,
+ const AudioBus* search_segment,
+ const float* energy_target_block,
+ const float* energy_candidate_blocks) {
+ int channels = search_segment->channels();
+ int block_size = target_block->frames();
+ int num_candidate_blocks = search_segment->frames() - (block_size - 1);
+ scoped_ptr<float[]> dot_prod(new float[channels]);
+ float similarity[3]; // Three elements for cubic interpolation.
+
+ int n = 0;
+ MultiChannelDotProduct(target_block, 0, search_segment, n, block_size,
+ dot_prod.get());
+ similarity[0] = MultiChannelSimilarityMeasure(
+ dot_prod.get(), energy_target_block,
+ &energy_candidate_blocks[n * channels], channels);
+
+ // Set the starting point as optimal point.
+ float best_similarity = similarity[0];
+ int optimal_index = 0;
+
+ n += decimation;
+ if (n >= num_candidate_blocks) {
+ return 0;
+ }
+
+ MultiChannelDotProduct(target_block, 0, search_segment, n, block_size,
+ dot_prod.get());
+ similarity[1] = MultiChannelSimilarityMeasure(
+ dot_prod.get(), energy_target_block,
+ &energy_candidate_blocks[n * channels], channels);
+
+ n += decimation;
+ if (n >= num_candidate_blocks) {
+ // We cannot do any more sampling. Compare these two values and return the
+ // optimal index.
+ return similarity[1] > similarity[0] ? decimation : 0;
+ }
+
+ for (; n < num_candidate_blocks; n += decimation) {
+ MultiChannelDotProduct(target_block, 0, search_segment, n, block_size,
+ dot_prod.get());
+
+ similarity[2] = MultiChannelSimilarityMeasure(
+ dot_prod.get(), energy_target_block,
+ &energy_candidate_blocks[n * channels], channels);
+
+ if ((similarity[1] > similarity[0] && similarity[1] >= similarity[2]) ||
+ (similarity[1] >= similarity[0] && similarity[1] > similarity[2])) {
+ // A local maximum is found. Do a cubic interpolation for a better
+ // estimate of candidate maximum.
+ float normalized_candidate_index;
+ float candidate_similarity;
+ CubicInterpolation(similarity, &normalized_candidate_index,
+ &candidate_similarity);
+
+ int candidate_index = n - decimation + static_cast<int>(
+ normalized_candidate_index * decimation + 0.5f);
+ if (candidate_similarity > best_similarity &&
+ !InInterval(candidate_index, exclude_interval)) {
+ optimal_index = candidate_index;
+ best_similarity = candidate_similarity;
+ }
+ } else if (n + decimation >= num_candidate_blocks &&
+ similarity[2] > best_similarity &&
+ !InInterval(n, exclude_interval)) {
+ // If this is the end-point and has a better similarity-measure than
+ // optimal, then we accept it as optimal point.
+ optimal_index = n;
+ best_similarity = similarity[2];
+ }
+ memmove(similarity, &similarity[1], 2 * sizeof(*similarity));
+ }
+ return optimal_index;
+}
+
+int FullSearch(int low_limit,
+ int high_limit,
+ Interval exclude_interval,
+ const AudioBus* target_block,
+ const AudioBus* search_block,
+ const float* energy_target_block,
+ const float* energy_candidate_blocks) {
+ int channels = search_block->channels();
+ int block_size = target_block->frames();
+ scoped_ptr<float[]> dot_prod(new float[channels]);
+
+ float best_similarity = std::numeric_limits<float>::min();
+ int optimal_index = 0;
+
+ for (int n = low_limit; n <= high_limit; ++n) {
+ if (InInterval(n, exclude_interval)) {
+ continue;
+ }
+ MultiChannelDotProduct(target_block, 0, search_block, n, block_size,
+ dot_prod.get());
+
+ float similarity = MultiChannelSimilarityMeasure(
+ dot_prod.get(), energy_target_block,
+ &energy_candidate_blocks[n * channels], channels);
+
+ if (similarity > best_similarity) {
+ best_similarity = similarity;
+ optimal_index = n;
+ }
+ }
+
+ return optimal_index;
+}
+
+int OptimalIndex(const AudioBus* search_block,
+ const AudioBus* target_block,
+ Interval exclude_interval) {
+ int channels = search_block->channels();
+ DCHECK_EQ(channels, target_block->channels());
+ int target_size = target_block->frames();
+ int num_candidate_blocks = search_block->frames() - (target_size - 1);
+
+ // This is a compromise between complexity reduction and search accuracy. I
+ // don't have a proof that down sample of order 5 is optimal. One can compute
+ // a decimation factor that minimizes complexity given the size of
+ // |search_block| and |target_block|. However, my experiments show the rate of
+ // missing the optimal index is significant. This value is chosen
+ // heuristically based on experiments.
+ const int kSearchDecimation = 5;
+
+ scoped_ptr<float[]> energy_target_block(new float[channels]);
+ scoped_ptr<float[]> energy_candidate_blocks(
+ new float[channels * num_candidate_blocks]);
+
+ // Energy of all candid frames.
+ MultiChannelMovingBlockEnergies(search_block, target_size,
+ energy_candidate_blocks.get());
+
+ // Energy of target frame.
+ MultiChannelDotProduct(target_block, 0, target_block, 0,
+ target_size, energy_target_block.get());
+
+ int optimal_index = DecimatedSearch(kSearchDecimation,
+ exclude_interval, target_block,
+ search_block, energy_target_block.get(),
+ energy_candidate_blocks.get());
+
+ int lim_low = std::max(0, optimal_index - kSearchDecimation);
+ int lim_high = std::min(num_candidate_blocks - 1,
+ optimal_index + kSearchDecimation);
+ return FullSearch(lim_low, lim_high, exclude_interval, target_block,
+ search_block, energy_target_block.get(),
+ energy_candidate_blocks.get());
+}
+
+void GetSymmetricHanningWindow(int window_length, float* window) {
+ const float scale = 2.0f * M_PI / window_length;
+ for (int n = 0; n < window_length; ++n)
+ window[n] = 0.5f * (1.0f - cosf(n * scale));
+}
+
+} // namespace internal
+
+} // namespace media
+
diff --git a/chromium/media/filters/wsola_internals.h b/chromium/media/filters/wsola_internals.h
new file mode 100644
index 00000000000..55fff04d30b
--- /dev/null
+++ b/chromium/media/filters/wsola_internals.h
@@ -0,0 +1,93 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A set of utility functions to perform WSOLA.
+
+#ifndef MEDIA_FILTERS_WSOLA_INTERNALS_H_
+#define MEDIA_FILTERS_WSOLA_INTERNALS_H_
+
+#include <utility>
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+
+namespace internal {
+
+typedef std::pair<int, int> Interval;
+
+// Dot-product of channels of two AudioBus. For each AudioBus an offset is
+// given. |dot_product[k]| is the dot-product of channel |k|. The caller should
+// allocate sufficient space for |dot_product|.
+MEDIA_EXPORT void MultiChannelDotProduct(const AudioBus* a,
+ int frame_offset_a,
+ const AudioBus* b,
+ int frame_offset_b,
+ int num_frames,
+ float* dot_product);
+
+// Energies of sliding windows of channels are interleaved.
+// The number windows is |input->frames()| - (|frames_per_window| - 1), hence,
+// the method assumes |energy| must be, at least, of size
+// (|input->frames()| - (|frames_per_window| - 1)) * |input->channels()|.
+MEDIA_EXPORT void MultiChannelMovingBlockEnergies(const AudioBus* input,
+ int frames_per_window,
+ float* energy);
+
+// Fit the curve f(x) = a * x^2 + b * x + c such that
+//
+// f(-1) = |y[0]|
+// f(0) = |y[1]|
+// f(1) = |y[2]|.
+//
+// Then compute the |extremum| point -b / (2*a) and |extremum_value|
+// b^2 / (4*a) - b^2 / (2*a) + c.
+//
+// It is not expected that this function is called with
+// y[0] == y[1] == y[2].
+MEDIA_EXPORT void CubicInterpolation(const float* y_values,
+ float* extremum,
+ float* extremum_value);
+
+// Search a subset of all candid blocks. The search is performed every
+// |decimation| frames. This reduces complexity by a factor of about
+// 1 / |decimation|. A cubic interpolation is used to have a better estimate of
+// the best match.
+MEDIA_EXPORT int DecimatedSearch(int decimation,
+ Interval exclude_interval,
+ const AudioBus* target_block,
+ const AudioBus* search_segment,
+ const float* energy_target_block,
+ const float* energy_candid_blocks);
+
+// Search [|low_limit|, |high_limit|] of |search_segment| to find a block that
+// is most similar to |target_block|. |energy_target_block| is the energy of the
+// |target_block|. |energy_candidate_blocks| is the energy of all blocks within
+// |search_block|.
+MEDIA_EXPORT int FullSearch(int low_limit,
+ int hight_limimit,
+ Interval exclude_interval,
+ const AudioBus* target_block,
+ const AudioBus* search_block,
+ const float* energy_target_block,
+ const float* energy_candidate_blocks);
+
+// Find the index of the block, within |search_block|, that is most similar
+// to |target_block|. Obviously, the returned index is w.r.t. |search_block|.
+// |exclude_interval| is an interval that is excluded from the search.
+MEDIA_EXPORT int OptimalIndex(const AudioBus* search_block,
+ const AudioBus* target_block,
+ Interval exclude_interval);
+
+// Return a "periodic" Hann window. This is the first L samples of an L+1
+// Hann window. It is perfect reconstruction for overlap-and-add.
+MEDIA_EXPORT void GetSymmetricHanningWindow(int window_length, float* window);
+
+} // namespace internal
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_WSOLA_INTERNALS_H_
diff --git a/chromium/media/media.gyp b/chromium/media/media.gyp
index c685090fd17..09deb821b22 100644
--- a/chromium/media/media.gyp
+++ b/chromium/media/media.gyp
@@ -12,12 +12,12 @@
# detection of ABI mismatches and prevents silent errors.
'linux_link_pulseaudio%': 0,
'conditions': [
- ['OS=="android" or OS=="ios"', {
- # Android and iOS don't use ffmpeg.
+ ['OS=="android"', {
+ # Android doesn't use ffmpeg.
'media_use_ffmpeg%': 0,
- # Android and iOS don't use libvpx.
+ # Android doesn't use libvpx.
'media_use_libvpx%': 0,
- }, { # 'OS!="android" and OS!="ios"'
+ }, { # 'OS!="android"'
'media_use_ffmpeg%': 1,
'media_use_libvpx%': 1,
}],
@@ -27,24 +27,31 @@
}, {
'use_alsa%': 0,
}],
- ['os_posix==1 and OS!="mac" and OS!="ios" and OS!="android" and chromeos!=1', {
+ ['os_posix==1 and OS!="mac" and OS!="android" and chromeos!=1', {
'use_pulseaudio%': 1,
}, {
'use_pulseaudio%': 0,
}],
],
},
+ 'includes': [
+ 'media_cdm.gypi',
+ ],
'targets': [
{
'target_name': 'media',
'type': '<(component)',
'dependencies': [
'../base/base.gyp:base',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../crypto/crypto.gyp:crypto',
+ '../net/net.gyp:net',
+ '../gpu/gpu.gyp:command_buffer_common',
'../skia/skia.gyp:skia',
'../third_party/opus/opus.gyp:opus',
'../ui/ui.gyp:ui',
'../url/url.gyp:url_lib',
+ 'shared_memory_support',
],
'defines': [
'MEDIA_IMPLEMENTATION',
@@ -60,9 +67,7 @@
'audio/android/opensles_input.h',
'audio/android/opensles_output.cc',
'audio/android/opensles_output.h',
- 'audio/async_socket_io_handler.h',
- 'audio/async_socket_io_handler_posix.cc',
- 'audio/async_socket_io_handler_win.cc',
+ 'audio/android/opensles_wrapper.cc',
'audio/audio_buffers_state.cc',
'audio/audio_buffers_state.h',
'audio/audio_device_name.cc',
@@ -99,26 +104,20 @@
'audio/audio_source_diverter.h',
'audio/audio_util.cc',
'audio/audio_util.h',
+ 'audio/clockless_audio_sink.cc',
+ 'audio/clockless_audio_sink.h',
'audio/cras/audio_manager_cras.cc',
'audio/cras/audio_manager_cras.h',
'audio/cras/cras_input.cc',
'audio/cras/cras_input.h',
'audio/cras/cras_unified.cc',
'audio/cras/cras_unified.h',
- 'audio/cross_process_notification.cc',
- 'audio/cross_process_notification.h',
- 'audio/cross_process_notification_posix.cc',
- 'audio/cross_process_notification_win.cc',
'audio/fake_audio_consumer.cc',
'audio/fake_audio_consumer.h',
'audio/fake_audio_input_stream.cc',
'audio/fake_audio_input_stream.h',
'audio/fake_audio_output_stream.cc',
'audio/fake_audio_output_stream.h',
- 'audio/ios/audio_manager_ios.h',
- 'audio/ios/audio_manager_ios.mm',
- 'audio/ios/audio_session_util_ios.h',
- 'audio/ios/audio_session_util_ios.mm',
'audio/linux/alsa_input.cc',
'audio/linux/alsa_input.h',
'audio/linux/alsa_output.cc',
@@ -153,10 +152,10 @@
'audio/openbsd/audio_manager_openbsd.h',
'audio/pulse/audio_manager_pulse.cc',
'audio/pulse/audio_manager_pulse.h',
- 'audio/pulse/pulse_output.cc',
- 'audio/pulse/pulse_output.h',
'audio/pulse/pulse_input.cc',
'audio/pulse/pulse_input.h',
+ 'audio/pulse/pulse_output.cc',
+ 'audio/pulse/pulse_output.h',
'audio/pulse/pulse_unified.cc',
'audio/pulse/pulse_unified.h',
'audio/pulse/pulse_util.cc',
@@ -183,14 +182,15 @@
'audio/win/audio_unified_win.h',
'audio/win/avrt_wrapper_win.cc',
'audio/win/avrt_wrapper_win.h',
- 'audio/win/device_enumeration_win.cc',
- 'audio/win/device_enumeration_win.h',
'audio/win/core_audio_util_win.cc',
'audio/win/core_audio_util_win.h',
+ 'audio/win/device_enumeration_win.cc',
+ 'audio/win/device_enumeration_win.h',
'audio/win/wavein_input_win.cc',
'audio/win/wavein_input_win.h',
'audio/win/waveout_output_win.cc',
'audio/win/waveout_output_win.h',
+ 'base/android/demuxer_android.h',
'base/android/demuxer_stream_player_params.cc',
'base/android/demuxer_stream_player_params.h',
'base/android/media_player_manager.h',
@@ -217,19 +217,19 @@
'base/audio_pull_fifo.h',
'base/audio_renderer.cc',
'base/audio_renderer.h',
- 'base/audio_renderer_sink.h',
'base/audio_renderer_mixer.cc',
'base/audio_renderer_mixer.h',
'base/audio_renderer_mixer_input.cc',
'base/audio_renderer_mixer_input.h',
+ 'base/audio_renderer_sink.h',
'base/audio_splicer.cc',
'base/audio_splicer.h',
'base/audio_timestamp_helper.cc',
'base/audio_timestamp_helper.h',
'base/bind_to_loop.h',
- 'base/bitstream_buffer.h',
'base/bit_reader.cc',
'base/bit_reader.h',
+ 'base/bitstream_buffer.h',
'base/buffers.h',
'base/byte_queue.cc',
'base/byte_queue.h',
@@ -247,10 +247,10 @@
'base/decoder_buffer.h',
'base/decoder_buffer_queue.cc',
'base/decoder_buffer_queue.h',
- 'base/decryptor.cc',
- 'base/decryptor.h',
'base/decrypt_config.cc',
'base/decrypt_config.h',
+ 'base/decryptor.cc',
+ 'base/decryptor.h',
'base/demuxer.cc',
'base/demuxer.h',
'base/demuxer_stream.cc',
@@ -259,6 +259,8 @@
'base/djb2.h',
'base/filter_collection.cc',
'base/filter_collection.h',
+ 'base/keyboard_event_counter.cc',
+ 'base/keyboard_event_counter.h',
'base/media.cc',
'base/media.h',
'base/media_file_checker.cc',
@@ -302,6 +304,11 @@
'base/stream_parser_buffer.cc',
'base/stream_parser_buffer.h',
'base/text_track.h',
+ 'base/user_input_monitor.cc',
+ 'base/user_input_monitor.h',
+ 'base/user_input_monitor_linux.cc',
+ 'base/user_input_monitor_mac.cc',
+ 'base/user_input_monitor_win.cc',
'base/video_decoder.cc',
'base/video_decoder.h',
'base/video_decoder_config.cc',
@@ -348,10 +355,10 @@
'filters/ffmpeg_video_decoder.h',
'filters/file_data_source.cc',
'filters/file_data_source.h',
+ 'filters/gpu_video_accelerator_factories.cc',
+ 'filters/gpu_video_accelerator_factories.h',
'filters/gpu_video_decoder.cc',
'filters/gpu_video_decoder.h',
- 'filters/gpu_video_decoder_factories.cc',
- 'filters/gpu_video_decoder_factories.h',
'filters/h264_to_annex_b_bitstream_converter.cc',
'filters/h264_to_annex_b_bitstream_converter.h',
'filters/in_memory_url_protocol.cc',
@@ -372,12 +379,16 @@
'filters/video_renderer_base.h',
'filters/vpx_video_decoder.cc',
'filters/vpx_video_decoder.h',
- 'midi/midi_manager.h',
+ 'filters/wsola_internals.cc',
+ 'filters/wsola_internals.h',
'midi/midi_manager.cc',
- 'midi/midi_manager_mac.h',
+ 'midi/midi_manager.h',
'midi/midi_manager_mac.cc',
- 'midi/midi_port_info.h',
+ 'midi/midi_manager_mac.h',
'midi/midi_port_info.cc',
+ 'midi/midi_port_info.h',
+ 'mp3/mp3_stream_parser.cc',
+ 'mp3/mp3_stream_parser.h',
'video/capture/android/video_capture_device_android.cc',
'video/capture/android/video_capture_device_android.h',
'video/capture/fake_video_capture_device.cc',
@@ -388,14 +399,12 @@
'video/capture/mac/video_capture_device_mac.mm',
'video/capture/mac/video_capture_device_qtkit_mac.h',
'video/capture/mac/video_capture_device_qtkit_mac.mm',
-
'video/capture/video_capture.h',
'video/capture/video_capture_device.cc',
'video/capture/video_capture_device.h',
- 'video/capture/video_capture_device_dummy.cc',
- 'video/capture/video_capture_device_dummy.h',
'video/capture/video_capture_proxy.cc',
'video/capture/video_capture_proxy.h',
+ 'video/capture/video_capture_types.cc',
'video/capture/video_capture_types.h',
'video/capture/win/capability_list_win.cc',
'video/capture/win/capability_list_win.h',
@@ -416,6 +425,8 @@
'video/picture.h',
'video/video_decode_accelerator.cc',
'video/video_decode_accelerator.h',
+ 'video/video_encode_accelerator.cc',
+ 'video/video_encode_accelerator.h',
'webm/webm_audio_client.cc',
'webm/webm_audio_client.h',
'webm/webm_cluster_parser.cc',
@@ -452,13 +463,6 @@
'USE_NEON'
],
}],
- ['OS!="ios"', {
- 'dependencies': [
- '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
- '../gpu/gpu.gyp:command_buffer_common',
- 'shared_memory_support',
- ],
- }],
['media_use_ffmpeg==1', {
'dependencies': [
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
@@ -505,55 +509,7 @@
'filters/vpx_video_decoder.h',
],
}],
- ['OS=="ios"', {
- 'includes': [
- # For shared_memory_support_sources variable.
- 'shared_memory_support.gypi',
- ],
- 'sources': [
- 'base/media_stub.cc',
- # These sources are normally built via a dependency on the
- # shared_memory_support target, but that target is not built on iOS.
- # Instead, directly build only the files that are needed for iOS.
- '<@(shared_memory_support_sources)',
- ],
- 'sources/': [
- # Exclude everything but iOS-specific files.
- ['exclude', '\\.(cc|mm)$'],
- ['include', '_ios\\.(cc|mm)$'],
- ['include', '(^|/)ios/'],
- # Re-include specific pieces.
- # iOS support is limited to audio input only.
- ['include', '^audio/audio_buffers_state\\.'],
- ['include', '^audio/audio_input_controller\\.'],
- ['include', '^audio/audio_manager\\.'],
- ['include', '^audio/audio_manager_base\\.'],
- ['include', '^audio/audio_parameters\\.'],
- ['include', '^audio/fake_audio_consumer\\.'],
- ['include', '^audio/fake_audio_input_stream\\.'],
- ['include', '^audio/fake_audio_output_stream\\.'],
- ['include', '^base/audio_bus\\.'],
- ['include', '^base/channel_layout\\.'],
- ['include', '^base/media\\.cc$'],
- ['include', '^base/media_stub\\.cc$'],
- ['include', '^base/media_switches\\.'],
- ['include', '^base/vector_math\\.'],
- ],
- 'link_settings': {
- 'libraries': [
- '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
- '$(SDKROOT)/System/Library/Frameworks/AVFoundation.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
- '$(SDKROOT)/System/Library/Frameworks/CoreMIDI.framework',
- ],
- },
- }],
['OS=="android"', {
- 'link_settings': {
- 'libraries': [
- '-lOpenSLES',
- ],
- },
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/media',
],
@@ -574,6 +530,9 @@
],
}],
],
+ 'defines': [
+ 'DISABLE_USER_INPUT_MONITOR',
+ ],
}],
# A simple WebM encoder for animated avatars on ChromeOS.
['chromeos==1', {
@@ -622,8 +581,16 @@
'-lXdamage',
'-lXext',
'-lXfixes',
+ '-lXtst',
],
},
+ }, { # else: use_x11==0
+ 'sources!': [
+ 'base/user_input_monitor_linux.cc',
+ ],
+ 'defines': [
+ 'DISABLE_USER_INPUT_MONITOR',
+ ],
}],
['use_cras==1', {
'cflags': [
@@ -809,6 +776,25 @@
}],
['proprietary_codecs==1 or branding=="Chrome"', {
'sources': [
+ 'mp2t/es_parser.h',
+ 'mp2t/es_parser_adts.cc',
+ 'mp2t/es_parser_adts.h',
+ 'mp2t/es_parser_h264.cc',
+ 'mp2t/es_parser_h264.h',
+ 'mp2t/mp2t_common.h',
+ 'mp2t/mp2t_stream_parser.cc',
+ 'mp2t/mp2t_stream_parser.h',
+ 'mp2t/ts_packet.cc',
+ 'mp2t/ts_packet.h',
+ 'mp2t/ts_section.h',
+ 'mp2t/ts_section_pat.cc',
+ 'mp2t/ts_section_pat.h',
+ 'mp2t/ts_section_pes.cc',
+ 'mp2t/ts_section_pes.h',
+ 'mp2t/ts_section_pmt.cc',
+ 'mp2t/ts_section_pmt.h',
+ 'mp2t/ts_section_psi.cc',
+ 'mp2t/ts_section_psi.h',
'mp4/aac.cc',
'mp4/aac.h',
'mp4/avc.cc',
@@ -834,8 +820,7 @@
'../build/linux/system.gyp:gtk',
],
}],
- # ios check is necessary due to http://crbug.com/172682.
- ['OS!="ios" and (target_arch=="ia32" or target_arch=="x64")', {
+ ['target_arch=="ia32" or target_arch=="x64"', {
'dependencies': [
'media_asm',
'media_mmx',
@@ -851,13 +836,10 @@
'ENABLE_EAC3_PLAYBACK',
],
}],
- ],
- 'target_conditions': [
- ['OS=="ios"', {
- 'sources/': [
- # Pull in specific Mac files for iOS (which have been filtered out
- # by file name rules).
- ['include', '^audio/mac/audio_input_mac\\.'],
+ ['OS!="linux" and OS!="win"', {
+ 'sources!': [
+ 'base/keyboard_event_counter.cc',
+ 'base/keyboard_event_counter.h',
],
}],
],
@@ -868,29 +850,29 @@
'dependencies': [
'media',
'media_test_support',
+ 'shared_memory_support',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_base',
+ '../gpu/gpu.gyp:command_buffer_common',
'../skia/skia.gyp:skia',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../ui/ui.gyp:ui',
],
'sources': [
- 'audio/async_socket_io_handler_unittest.cc',
+ 'audio/android/audio_android_unittest.cc',
'audio/audio_input_controller_unittest.cc',
- 'audio/audio_input_device_unittest.cc',
'audio/audio_input_unittest.cc',
'audio/audio_input_volume_unittest.cc',
'audio/audio_low_latency_input_output_unittest.cc',
+ 'audio/audio_manager_unittest.cc',
'audio/audio_output_controller_unittest.cc',
'audio/audio_output_device_unittest.cc',
'audio/audio_output_proxy_unittest.cc',
'audio/audio_parameters_unittest.cc',
'audio/audio_power_monitor_unittest.cc',
- 'audio/cross_process_notification_unittest.cc',
'audio/fake_audio_consumer_unittest.cc',
- 'audio/ios/audio_manager_ios_unittest.cc',
'audio/linux/alsa_output_unittest.cc',
'audio/mac/audio_auhal_mac_unittest.cc',
'audio/mac/audio_device_listener_mac_unittest.cc',
@@ -936,10 +918,12 @@
'base/ranges_unittest.cc',
'base/run_all_unittests.cc',
'base/scoped_histogram_timer_unittest.cc',
+ 'base/serial_runner_unittest.cc',
'base/seekable_buffer_unittest.cc',
'base/sinc_resampler_unittest.cc',
'base/test_data_util.cc',
'base/test_data_util.h',
+ 'base/user_input_monitor_unittest.cc',
'base/vector_math_testing.h',
'base/vector_math_unittest.cc',
'base/video_frame_unittest.cc',
@@ -993,12 +977,6 @@
'USE_NEON'
],
}],
- ['OS!="ios"', {
- 'dependencies': [
- '../gpu/gpu.gyp:command_buffer_common',
- 'shared_memory_support',
- ],
- }],
['media_use_ffmpeg==1', {
'dependencies': [
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
@@ -1008,7 +986,17 @@
'base/media_file_checker_unittest.cc',
],
}],
- ['os_posix==1 and OS!="mac" and OS!="ios"', {
+ ['use_alsa==1', {
+ 'defines': [
+ 'USE_ALSA',
+ ],
+ }],
+ ['use_pulseaudio==1', {
+ 'defines': [
+ 'USE_PULSEAUDIO',
+ ],
+ }],
+ ['os_posix==1 and OS!="mac"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
@@ -1017,17 +1005,6 @@
}],
],
}],
- ['OS=="ios"', {
- 'sources/': [
- ['exclude', '.*'],
- ['include', '^audio/audio_input_controller_unittest\\.cc$'],
- ['include', '^audio/audio_input_unittest\\.cc$'],
- ['include', '^audio/audio_parameters_unittest\\.cc$'],
- ['include', '^audio/ios/audio_manager_ios_unittest\\.cc$'],
- ['include', '^base/mock_reader\\.h$'],
- ['include', '^base/run_all_unittests\\.cc$'],
- ],
- }],
['OS=="android"', {
'sources!': [
'audio/audio_input_volume_unittest.cc',
@@ -1043,6 +1020,8 @@
'filters/ffmpeg_video_decoder_unittest.cc',
'filters/pipeline_integration_test.cc',
'filters/pipeline_integration_test_base.cc',
+ 'mp2t/mp2t_stream_parser_unittest.cc',
+ 'mp3/mp3_stream_parser_unittest.cc',
'mp4/mp4_stream_parser_unittest.cc',
'webm/webm_cluster_parser_unittest.cc',
],
@@ -1074,13 +1053,15 @@
'audio/audio_low_latency_input_output_unittest.cc',
],
}],
- ['OS!="ios" and (target_arch=="ia32" or target_arch=="x64")', {
+ ['target_arch=="ia32" or target_arch=="x64"', {
'sources': [
'base/simd/convert_rgb_to_yuv_unittest.cc',
],
}],
- ['proprietary_codecs==1 or branding=="Chrome"', {
+ ['proprietary_codecs==1', {
'sources': [
+ 'mp2t/mp2t_stream_parser_unittest.cc',
+ 'mp3/mp3_stream_parser_unittest.cc',
'mp4/aac_unittest.cc',
'mp4/avc_unittest.cc',
'mp4/box_reader_unittest.cc',
@@ -1125,15 +1106,62 @@
'base/mock_filters.h',
'base/test_helpers.cc',
'base/test_helpers.h',
- 'filters/mock_gpu_video_decoder_factories.cc',
- 'filters/mock_gpu_video_decoder_factories.h',
+ 'filters/mock_gpu_video_accelerator_factories.cc',
+ 'filters/mock_gpu_video_accelerator_factories.h',
'video/mock_video_decode_accelerator.cc',
'video/mock_video_decode_accelerator.h',
],
},
+ {
+ # Minimal target for NaCl and other renderer side media clients which
+ # only need to send audio data across the shared memory to the browser
+ # process.
+ 'target_name': 'shared_memory_support',
+ 'type': '<(component)',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ ],
+ 'defines': [
+ 'MEDIA_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'includes': [
+ 'shared_memory_support.gypi',
+ ],
+ 'sources': [
+ '<@(shared_memory_support_sources)',
+ ],
+ 'conditions': [
+ ['arm_neon==1', {
+ 'defines': [
+ 'USE_NEON'
+ ],
+ }],
+ ['target_arch=="ia32" or target_arch=="x64"', {
+ 'dependencies': [
+ 'shared_memory_support_sse'
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'demuxer_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ ],
+ 'sources': [
+ 'tools/demuxer_bench/demuxer_bench.cc',
+ ],
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ 'msvs_disabled_warnings': [ 4267, ],
+ },
],
'conditions': [
- ['OS!="ios" and target_arch!="arm"', {
+ ['target_arch!="arm"', {
'targets': [
{
'target_name': 'media_asm',
@@ -1232,13 +1260,8 @@
'include_dirs': [
'..',
],
- 'conditions': [
- # TODO(jschuh): Get MMX enabled on Win64. crbug.com/179657
- ['OS!="win" or target_arch=="ia32"', {
- 'sources': [
- 'base/simd/filter_yuv_mmx.cc',
- ],
- }],
+ 'sources': [
+ 'base/simd/filter_yuv_mmx.cc',
],
},
{
@@ -1293,125 +1316,6 @@
},
], # targets
}],
- ['OS!="ios"', {
- 'includes': [
- 'media_cdm.gypi',
- ],
- 'targets': [
- {
- # Minimal target for NaCl and other renderer side media clients which
- # only need to send audio data across the shared memory to the browser
- # process.
- 'target_name': 'shared_memory_support',
- 'type': '<(component)',
- 'dependencies': [
- '../base/base.gyp:base',
- ],
- 'defines': [
- 'MEDIA_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'includes': [
- 'shared_memory_support.gypi',
- ],
- 'sources': [
- '<@(shared_memory_support_sources)',
- ],
- 'conditions': [
- ['arm_neon==1', {
- 'defines': [
- 'USE_NEON'
- ],
- }],
- ['target_arch=="ia32" or target_arch=="x64"', {
- 'dependencies': [
- 'shared_memory_support_sse'
- ],
- }],
- ],
- },
- {
- 'target_name': 'seek_tester',
- 'type': 'executable',
- 'dependencies': [
- 'media',
- '../base/base.gyp:base',
- ],
- 'sources': [
- 'tools/seek_tester/seek_tester.cc',
- ],
- },
- {
- 'target_name': 'demuxer_bench',
- 'type': 'executable',
- 'dependencies': [
- 'media',
- '../base/base.gyp:base',
- ],
- 'sources': [
- 'tools/demuxer_bench/demuxer_bench.cc',
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- },
- ],
- }],
- ['(OS=="win" or toolkit_uses_gtk==1) and use_aura!=1', {
- 'targets': [
- {
- 'target_name': 'shader_bench',
- 'type': 'executable',
- 'dependencies': [
- 'media',
- '../base/base.gyp:base',
- '../ui/gl/gl.gyp:gl',
- '../ui/ui.gyp:ui',
- ],
- 'sources': [
- 'tools/shader_bench/cpu_color_painter.cc',
- 'tools/shader_bench/cpu_color_painter.h',
- 'tools/shader_bench/gpu_color_painter.cc',
- 'tools/shader_bench/gpu_color_painter.h',
- 'tools/shader_bench/gpu_painter.cc',
- 'tools/shader_bench/gpu_painter.h',
- 'tools/shader_bench/painter.cc',
- 'tools/shader_bench/painter.h',
- 'tools/shader_bench/shader_bench.cc',
- 'tools/shader_bench/window.cc',
- 'tools/shader_bench/window.h',
- ],
- 'conditions': [
- ['toolkit_uses_gtk==1', {
- 'dependencies': [
- '../build/linux/system.gyp:gtk',
- ],
- 'sources': [
- 'tools/shader_bench/window_linux.cc',
- ],
- }],
- ['OS=="win"', {
- 'dependencies': [
- '../third_party/angle_dx11/src/build_angle.gyp:libEGL',
- '../third_party/angle_dx11/src/build_angle.gyp:libGLESv2',
- ],
- 'sources': [
- 'tools/shader_bench/window_win.cc',
- ],
- }],
- # See http://crbug.com/162998#c4 for why this is needed.
- ['OS=="linux" and linux_use_tcmalloc==1', {
- 'dependencies': [
- '../base/allocator/allocator.gyp:allocator',
- ],
- }],
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- },
- ],
- }],
['use_x11==1', {
'targets': [
{
@@ -1479,6 +1383,7 @@
'sources': [
'base/android/java/src/org/chromium/media/AudioManagerAndroid.java',
'base/android/java/src/org/chromium/media/MediaCodecBridge.java',
+ 'base/android/java/src/org/chromium/media/MediaDrmBridge.java',
'base/android/java/src/org/chromium/media/MediaPlayerBridge.java',
'base/android/java/src/org/chromium/media/MediaPlayerListener.java',
'base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java',
@@ -1503,8 +1408,12 @@
'target_name': 'player_android',
'type': 'static_library',
'sources': [
+ 'base/android/audio_decoder_job.cc',
+ 'base/android/audio_decoder_job.h',
'base/android/media_codec_bridge.cc',
'base/android/media_codec_bridge.h',
+ 'base/android/media_decoder_job.cc',
+ 'base/android/media_decoder_job.h',
'base/android/media_drm_bridge.cc',
'base/android/media_drm_bridge.h',
'base/android/media_jni_registrar.cc',
@@ -1517,6 +1426,8 @@
'base/android/media_player_listener.h',
'base/android/media_source_player.cc',
'base/android/media_source_player.h',
+ 'base/android/video_decoder_job.cc',
+ 'base/android/video_decoder_job.h',
'base/android/webaudio_media_codec_bridge.cc',
'base/android/webaudio_media_codec_bridge.h',
'base/android/webaudio_media_codec_info.h',
@@ -1539,6 +1450,7 @@
'type': 'none',
'dependencies': [
'../base/base.gyp:base',
+ 'media_android_imageformat_list',
],
'export_dependent_settings': [
'../base/base.gyp:base',
@@ -1548,7 +1460,18 @@
},
'includes': ['../build/java.gypi'],
},
-
+ {
+ 'target_name': 'media_android_imageformat_list',
+ 'type': 'none',
+ 'sources': [
+ 'base/android/java/src/org/chromium/media/ImageFormat.template',
+ ],
+ 'variables': {
+ 'package_name': 'org/chromium/media',
+ 'template_deps': ['video/capture/android/imageformat_list.h'],
+ },
+ 'includes': [ '../build/android/java_cpp_template.gypi' ],
+ },
],
}],
['media_use_ffmpeg==1', {
@@ -1617,34 +1540,6 @@
}],
],
},
- {
- 'target_name': 'ffmpeg_tests',
- 'type': 'executable',
- 'dependencies': [
- '../base/base.gyp:base',
- '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
- 'media',
- ],
- 'sources': [
- 'test/ffmpeg_tests/ffmpeg_tests.cc',
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- },
- {
- 'target_name': 'media_bench',
- 'type': 'executable',
- 'dependencies': [
- '../base/base.gyp:base',
- '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
- 'media',
- ],
- 'sources': [
- 'tools/media_bench/media_bench.cc',
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- },
],
}],
],
diff --git a/chromium/media/media_cdm.gypi b/chromium/media/media_cdm.gypi
index a4a94f79dbf..2f15fac524c 100644
--- a/chromium/media/media_cdm.gypi
+++ b/chromium/media/media_cdm.gypi
@@ -5,10 +5,10 @@
{
'variables': {
'conditions': [
- ['OS == "android" or OS == "ios"', {
- # Android and iOS don't use ffmpeg.
+ ['OS == "android"', {
+ # Android doesn't use ffmpeg.
'use_ffmpeg%': 0,
- }, { # 'OS != "android" and OS != "ios"'
+ }, { # 'OS != "android"'
'use_ffmpeg%': 1,
}],
],
diff --git a/chromium/media/media_untrusted.gyp b/chromium/media/media_untrusted.gyp
index dade625c33f..638d401c3ba 100644
--- a/chromium/media/media_untrusted.gyp
+++ b/chromium/media/media_untrusted.gyp
@@ -19,7 +19,7 @@
'nacl_untrusted_build': 1,
'nlib_target': 'libshared_memory_support_untrusted.a',
'build_glibc': 0,
- 'build_newlib': 1,
+ 'build_newlib': 0,
'build_irt': 1,
},
'dependencies': [
diff --git a/chromium/media/midi/midi_manager.cc b/chromium/media/midi/midi_manager.cc
index 05fcfa45a96..b3262e4a034 100644
--- a/chromium/media/midi/midi_manager.cc
+++ b/chromium/media/midi/midi_manager.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/message_loop/message_loop.h"
#include "base/threading/thread.h"
namespace media {
@@ -52,7 +53,7 @@ void MIDIManager::AddOutputPort(const MIDIPortInfo& info) {
}
void MIDIManager::ReceiveMIDIData(
- int port_index,
+ uint32 port_index,
const uint8* data,
size_t length,
double timestamp) {
@@ -62,10 +63,13 @@ void MIDIManager::ReceiveMIDIData(
(*i)->ReceiveMIDIData(port_index, data, length, timestamp);
}
+bool MIDIManager::CurrentlyOnMIDISendThread() {
+ return send_thread_->message_loop() == base::MessageLoop::current();
+}
+
void MIDIManager::DispatchSendMIDIData(MIDIManagerClient* client,
- int port_index,
- const uint8* data,
- size_t length,
+ uint32 port_index,
+ const std::vector<uint8>& data,
double timestamp) {
// Lazily create the thread when first needed.
if (!send_thread_) {
@@ -77,7 +81,7 @@ void MIDIManager::DispatchSendMIDIData(MIDIManagerClient* client,
send_message_loop_->PostTask(
FROM_HERE,
base::Bind(&MIDIManager::SendMIDIData, base::Unretained(this),
- client, port_index, data, length, timestamp));
+ client, port_index, data, timestamp));
}
} // namespace media
diff --git a/chromium/media/midi/midi_manager.h b/chromium/media/midi/midi_manager.h
index c2b26ab1b13..6a301a942d9 100644
--- a/chromium/media/midi/midi_manager.h
+++ b/chromium/media/midi/midi_manager.h
@@ -6,6 +6,7 @@
#define MEDIA_MIDI_MIDI_MANAGER_H_
#include <set>
+#include <vector>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
@@ -33,7 +34,7 @@ class MEDIA_EXPORT MIDIManagerClient {
// |data| represents a series of bytes encoding one or more MIDI messages.
// |length| is the number of bytes in |data|.
// |timestamp| is the time the data was received, in seconds.
- virtual void ReceiveMIDIData(int port_index,
+ virtual void ReceiveMIDIData(uint32 port_index,
const uint8* data,
size_t length,
double timestamp) = 0;
@@ -70,9 +71,8 @@ class MEDIA_EXPORT MIDIManager {
// |timestamp| is the time to send the data, in seconds. A value of 0
// means send "now" or as soon as possible.
void DispatchSendMIDIData(MIDIManagerClient* client,
- int port_index,
- const uint8* data,
- size_t length,
+ uint32 port_index,
+ const std::vector<uint8>& data,
double timestamp);
// input_ports() is a list of MIDI ports for receiving MIDI data.
@@ -90,21 +90,23 @@ class MEDIA_EXPORT MIDIManager {
virtual bool Initialize() = 0;
// Implements the platform-specific details of sending MIDI data.
+ // This function runs on MIDISendThread.
virtual void SendMIDIData(MIDIManagerClient* client,
- int port_index,
- const uint8* data,
- size_t length,
+ uint32 port_index,
+ const std::vector<uint8>& data,
double timestamp) = 0;
void AddInputPort(const MIDIPortInfo& info);
void AddOutputPort(const MIDIPortInfo& info);
// Dispatches to all clients.
- void ReceiveMIDIData(
- int port_index,
- const uint8* data,
- size_t length,
- double timestamp);
+ void ReceiveMIDIData(uint32 port_index,
+ const uint8* data,
+ size_t length,
+ double timestamp);
+
+ // Checks if current thread is MIDISendThread.
+ bool CurrentlyOnMIDISendThread();
bool initialized_;
diff --git a/chromium/media/midi/midi_manager_mac.cc b/chromium/media/midi/midi_manager_mac.cc
index d766bdb6c5f..4477944e773 100644
--- a/chromium/media/midi/midi_manager_mac.cc
+++ b/chromium/media/midi/midi_manager_mac.cc
@@ -54,7 +54,7 @@ bool MIDIManagerMac::Initialize() {
result = MIDIInputPortCreate(
midi_client_,
CFSTR("MIDI Input"),
- ReadMidiDispatch,
+ ReadMIDIDispatch,
this,
&coremidi_input_);
if (result != noErr)
@@ -67,10 +67,10 @@ bool MIDIManagerMac::Initialize() {
if (result != noErr)
return false;
- int destination_count = MIDIGetNumberOfDestinations();
- destinations_.reserve(destination_count);
+ uint32 destination_count = MIDIGetNumberOfDestinations();
+ destinations_.resize(destination_count);
- for (int i = 0; i < destination_count ; i++) {
+ for (uint32 i = 0; i < destination_count ; i++) {
MIDIEndpointRef destination = MIDIGetDestination(i);
// Keep track of all destinations (known as outputs by the Web MIDI API).
@@ -82,9 +82,9 @@ bool MIDIManagerMac::Initialize() {
}
// Open connections from all sources.
- int source_count = MIDIGetNumberOfSources();
+ uint32 source_count = MIDIGetNumberOfSources();
- for (int i = 0; i < source_count; ++i) {
+ for (uint32 i = 0; i < source_count; ++i) {
// Receive from all sources.
MIDIEndpointRef src = MIDIGetSource(i);
MIDIPortConnectSource(coremidi_input_, src, reinterpret_cast<void*>(src));
@@ -110,7 +110,7 @@ MIDIManagerMac::~MIDIManagerMac() {
MIDIPortDispose(coremidi_output_);
}
-void MIDIManagerMac::ReadMidiDispatch(const MIDIPacketList* packet_list,
+void MIDIManagerMac::ReadMIDIDispatch(const MIDIPacketList* packet_list,
void* read_proc_refcon,
void* src_conn_refcon) {
MIDIManagerMac* manager = static_cast<MIDIManagerMac*>(read_proc_refcon);
@@ -121,16 +121,16 @@ void MIDIManagerMac::ReadMidiDispatch(const MIDIPacketList* packet_list,
#endif
// Dispatch to class method.
- manager->ReadMidi(source, packet_list);
+ manager->ReadMIDI(source, packet_list);
}
-void MIDIManagerMac::ReadMidi(MIDIEndpointRef source,
+void MIDIManagerMac::ReadMIDI(MIDIEndpointRef source,
const MIDIPacketList* packet_list) {
// Lookup the port index based on the source.
SourceMap::iterator j = source_map_.find(source);
if (j == source_map_.end())
return;
- int port_index = source_map_[source];
+ uint32 port_index = source_map_[source];
// Go through each packet and process separately.
for(size_t i = 0; i < packet_list->numPackets; i++) {
@@ -147,10 +147,11 @@ void MIDIManagerMac::ReadMidi(MIDIEndpointRef source,
}
void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
- int port_index,
- const uint8* data,
- size_t length,
+ uint32 port_index,
+ const std::vector<uint8>& data,
double timestamp) {
+ DCHECK(CurrentlyOnMIDISendThread());
+
// System Exclusive has already been filtered.
MIDITimeStamp coremidi_timestamp = SecondsToMIDITimeStamp(timestamp);
@@ -159,14 +160,11 @@ void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
kMaxPacketListSize,
midi_packet_,
coremidi_timestamp,
- length,
- data);
+ data.size(),
+ &data[0]);
// Lookup the destination based on the port index.
- // TODO(crogers): re-factor |port_index| to use unsigned
- // to avoid the need for this check.
- if (port_index < 0 ||
- static_cast<size_t>(port_index) >= destinations_.size())
+ if (static_cast<size_t>(port_index) >= destinations_.size())
return;
MIDIEndpointRef destination = destinations_[port_index];
@@ -176,7 +174,7 @@ void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
// Re-initialize for next time.
midi_packet_ = MIDIPacketListInit(packet_list_);
- client->AccumulateMIDIBytesSent(length);
+ client->AccumulateMIDIBytesSent(data.size());
}
MIDIPortInfo MIDIManagerMac::GetPortInfoFromEndpoint(
diff --git a/chromium/media/midi/midi_manager_mac.h b/chromium/media/midi/midi_manager_mac.h
index ed7b524f5c4..2397b8034f7 100644
--- a/chromium/media/midi/midi_manager_mac.h
+++ b/chromium/media/midi/midi_manager_mac.h
@@ -8,6 +8,7 @@
#include <CoreMIDI/MIDIServices.h>
#include <map>
#include <string>
+#include <vector>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
@@ -24,20 +25,19 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
// MIDIManager implementation.
virtual bool Initialize() OVERRIDE;
virtual void SendMIDIData(MIDIManagerClient* client,
- int port_index,
- const uint8* data,
- size_t length,
+ uint32 port_index,
+ const std::vector<uint8>& data,
double timestamp) OVERRIDE;
private:
// CoreMIDI callback for MIDI data.
// Each callback can contain multiple packets, each of which can contain
// multiple MIDI messages.
- static void ReadMidiDispatch(
+ static void ReadMIDIDispatch(
const MIDIPacketList *pktlist,
void *read_proc_refcon,
void *src_conn_refcon);
- virtual void ReadMidi(MIDIEndpointRef source, const MIDIPacketList *pktlist);
+ virtual void ReadMIDI(MIDIEndpointRef source, const MIDIPacketList *pktlist);
// Helper
static media::MIDIPortInfo GetPortInfoFromEndpoint(MIDIEndpointRef endpoint);
@@ -54,7 +54,7 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
MIDIPacketList* packet_list_;
MIDIPacket* midi_packet_;
- typedef std::map<MIDIEndpointRef, int> SourceMap;
+ typedef std::map<MIDIEndpointRef, uint32> SourceMap;
// Keeps track of the index (0-based) for each of our sources.
SourceMap source_map_;
diff --git a/chromium/media/mp2t/es_parser.h b/chromium/media/mp2t/es_parser.h
new file mode 100644
index 00000000000..da06c5ef673
--- /dev/null
+++ b/chromium/media/mp2t/es_parser.h
@@ -0,0 +1,42 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_ES_PARSER_H_
+#define MEDIA_MP2T_ES_PARSER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+
+namespace media {
+
+class StreamParserBuffer;
+
+namespace mp2t {
+
+class EsParser {
+ public:
+ typedef base::Callback<void(scoped_refptr<StreamParserBuffer>)> EmitBufferCB;
+
+ EsParser() {}
+ virtual ~EsParser() {}
+
+ // ES parsing.
+ // Should use kNoTimestamp when a timestamp is not valid.
+ virtual bool Parse(const uint8* buf, int size,
+ base::TimeDelta pts,
+ base::TimeDelta dts) = 0;
+
+ // Flush any pending buffer.
+ virtual void Flush() = 0;
+
+ // Reset the state of the ES parser.
+ virtual void Reset() = 0;
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
diff --git a/chromium/media/mp2t/es_parser_adts.cc b/chromium/media/mp2t/es_parser_adts.cc
new file mode 100644
index 00000000000..b7578360b69
--- /dev/null
+++ b/chromium/media/mp2t/es_parser_adts.cc
@@ -0,0 +1,295 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/es_parser_adts.h"
+
+#include <list>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/bit_reader.h"
+#include "media/base/buffers.h"
+#include "media/base/channel_layout.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/mp2t/mp2t_common.h"
+
+// Adts header is at least 7 bytes (can be 9 bytes).
+static const int kAdtsHeaderMinSize = 7;
+
+static const int adts_frequency_table[16] = {
+ 96000,
+ 88200,
+ 64000,
+ 48000,
+ 44100,
+ 32000,
+ 24000,
+ 22050,
+ 16000,
+ 12000,
+ 11025,
+ 8000,
+ 7350,
+ 0,
+ 0,
+ 0,
+};
+static const int kMaxSupportedFrequencyIndex = 12;
+
+static media::ChannelLayout adts_channel_layout[8] = {
+ media::CHANNEL_LAYOUT_NONE,
+ media::CHANNEL_LAYOUT_MONO,
+ media::CHANNEL_LAYOUT_STEREO,
+ media::CHANNEL_LAYOUT_SURROUND,
+ media::CHANNEL_LAYOUT_4_0,
+ media::CHANNEL_LAYOUT_5_0_BACK,
+ media::CHANNEL_LAYOUT_5_1_BACK,
+ media::CHANNEL_LAYOUT_7_1,
+};
+
+// Number of samples per frame.
+static const int kNumberSamplesPerAACFrame = 1024;
+
+static int ExtractAdtsFrameSize(const uint8* adts_header) {
+ return ((static_cast<int>(adts_header[5]) >> 5) |
+ (static_cast<int>(adts_header[4]) << 3) |
+ ((static_cast<int>(adts_header[3]) & 0x3) << 11));
+}
+
+static int ExtractAdtsFrequencyIndex(const uint8* adts_header) {
+ return ((adts_header[2] >> 2) & 0xf);
+}
+
+static int ExtractAdtsChannelConfig(const uint8* adts_header) {
+ return (((adts_header[3] >> 6) & 0x3) |
+ ((adts_header[2] & 0x1) << 2));
+}
+
+// Return true if buf corresponds to an ADTS syncword.
+// |buf| size must be at least 2.
+static bool isAdtsSyncWord(const uint8* buf) {
+ return (buf[0] == 0xff) && ((buf[1] & 0xf6) == 0xf0);
+}
+
+// Look for an ADTS syncword.
+// |new_pos| returns
+// - either the byte position of the ADTS frame (if found)
+// - or the byte position of 1st byte that was not processed (if not found).
+// In every case, the returned value in |new_pos| is such that new_pos >= pos
+// |frame_sz| returns the size of the ADTS frame (if found).
+// Return whether a syncword was found.
+static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
+ int pos,
+ int* new_pos, int* frame_sz) {
+ DCHECK_GE(pos, 0);
+ DCHECK_LE(pos, raw_es_size);
+
+ int max_offset = raw_es_size - kAdtsHeaderMinSize;
+ if (pos >= max_offset) {
+ // Do not change the position if:
+ // - max_offset < 0: not enough bytes to get a full header
+ // Since pos >= 0, this is a subcase of the next condition.
+ // - pos >= max_offset: might be the case after reading one full frame,
+ // |pos| is then incremented by the frame size and might then point
+ // to the end of the buffer.
+ *new_pos = pos;
+ return false;
+ }
+
+ for (int offset = pos; offset < max_offset; offset++) {
+ const uint8* cur_buf = &raw_es[offset];
+
+ if (!isAdtsSyncWord(cur_buf))
+ // The first 12 bits must be 1.
+ // The layer field (2 bits) must be set to 0.
+ continue;
+
+ int frame_size = ExtractAdtsFrameSize(cur_buf);
+ if (frame_size < kAdtsHeaderMinSize) {
+ // Too short to be an ADTS frame.
+ continue;
+ }
+
+ // Check whether there is another frame
+ // |size| apart from the current one.
+ int remaining_size = raw_es_size - offset;
+ if (remaining_size >= frame_size + 2 &&
+ !isAdtsSyncWord(&cur_buf[frame_size])) {
+ continue;
+ }
+
+ *new_pos = offset;
+ *frame_sz = frame_size;
+ return true;
+ }
+
+ *new_pos = max_offset;
+ return false;
+}
+
+namespace media {
+namespace mp2t {
+
+EsParserAdts::EsParserAdts(
+ const NewAudioConfigCB& new_audio_config_cb,
+ const EmitBufferCB& emit_buffer_cb)
+ : new_audio_config_cb_(new_audio_config_cb),
+ emit_buffer_cb_(emit_buffer_cb) {
+}
+
+EsParserAdts::~EsParserAdts() {
+}
+
+bool EsParserAdts::Parse(const uint8* buf, int size,
+ base::TimeDelta pts,
+ base::TimeDelta dts) {
+ int raw_es_size;
+ const uint8* raw_es;
+
+ // The incoming PTS applies to the access unit that comes just after
+ // the beginning of |buf|.
+ if (pts != kNoTimestamp()) {
+ es_byte_queue_.Peek(&raw_es, &raw_es_size);
+ pts_list_.push_back(EsPts(raw_es_size, pts));
+ }
+
+ // Copy the input data to the ES buffer.
+ es_byte_queue_.Push(buf, size);
+ es_byte_queue_.Peek(&raw_es, &raw_es_size);
+
+ // Look for every ADTS frame in the ES buffer starting at offset = 0
+ int es_position = 0;
+ int frame_size;
+ while (LookForSyncWord(raw_es, raw_es_size, es_position,
+ &es_position, &frame_size)) {
+ DVLOG(LOG_LEVEL_ES)
+ << "ADTS syncword @ pos=" << es_position
+ << " frame_size=" << frame_size;
+ DVLOG(LOG_LEVEL_ES)
+ << "ADTS header: "
+ << base::HexEncode(&raw_es[es_position], kAdtsHeaderMinSize);
+
+ // Do not process the frame if this one is a partial frame.
+ int remaining_size = raw_es_size - es_position;
+ if (frame_size > remaining_size)
+ break;
+
+ // Update the audio configuration if needed.
+ DCHECK_GE(frame_size, kAdtsHeaderMinSize);
+ if (!UpdateAudioConfiguration(&raw_es[es_position]))
+ return false;
+
+ // Get the PTS & the duration of this access unit.
+ while (!pts_list_.empty() &&
+ pts_list_.front().first <= es_position) {
+ audio_timestamp_helper_->SetBaseTimestamp(pts_list_.front().second);
+ pts_list_.pop_front();
+ }
+
+ base::TimeDelta current_pts = audio_timestamp_helper_->GetTimestamp();
+ base::TimeDelta frame_duration =
+ audio_timestamp_helper_->GetFrameDuration(kNumberSamplesPerAACFrame);
+
+ // Emit an audio frame.
+ bool is_key_frame = true;
+ scoped_refptr<StreamParserBuffer> stream_parser_buffer =
+ StreamParserBuffer::CopyFrom(
+ &raw_es[es_position],
+ frame_size,
+ is_key_frame);
+ stream_parser_buffer->SetDecodeTimestamp(current_pts);
+ stream_parser_buffer->set_timestamp(current_pts);
+ stream_parser_buffer->set_duration(frame_duration);
+ emit_buffer_cb_.Run(stream_parser_buffer);
+
+ // Update the PTS of the next frame.
+ audio_timestamp_helper_->AddFrames(kNumberSamplesPerAACFrame);
+
+ // Skip the current frame.
+ es_position += frame_size;
+ }
+
+ // Discard all the bytes that have been processed.
+ DiscardEs(es_position);
+
+ return true;
+}
+
+void EsParserAdts::Flush() {
+}
+
+void EsParserAdts::Reset() {
+ es_byte_queue_.Reset();
+ pts_list_.clear();
+ last_audio_decoder_config_ = AudioDecoderConfig();
+}
+
+bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
+ int frequency_index = ExtractAdtsFrequencyIndex(adts_header);
+ if (frequency_index > kMaxSupportedFrequencyIndex) {
+ // Frequency index 13 & 14 are reserved
+ // while 15 means that the frequency is explicitly written
+ // (not supported).
+ return false;
+ }
+
+ int channel_configuration = ExtractAdtsChannelConfig(adts_header);
+ if (channel_configuration == 0) {
+ // TODO(damienv): Add support for inband channel configuration.
+ return false;
+ }
+
+ // TODO(damienv): support HE-AAC frequency doubling (SBR)
+ // based on the incoming ADTS profile.
+ int samples_per_second = adts_frequency_table[frequency_index];
+ int adts_profile = (adts_header[2] >> 6) & 0x3;
+
+ AudioDecoderConfig audio_decoder_config(
+ kCodecAAC,
+ kSampleFormatS16,
+ adts_channel_layout[channel_configuration],
+ samples_per_second,
+ NULL, 0,
+ false);
+
+ if (!audio_decoder_config.Matches(last_audio_decoder_config_)) {
+ DVLOG(1) << "Sampling frequency: " << samples_per_second;
+ DVLOG(1) << "Channel config: " << channel_configuration;
+ DVLOG(1) << "Adts profile: " << adts_profile;
+ // Reset the timestamp helper to use a new time scale.
+ if (audio_timestamp_helper_) {
+ base::TimeDelta base_timestamp = audio_timestamp_helper_->GetTimestamp();
+ audio_timestamp_helper_.reset(
+ new AudioTimestampHelper(samples_per_second));
+ audio_timestamp_helper_->SetBaseTimestamp(base_timestamp);
+ } else {
+ audio_timestamp_helper_.reset(
+ new AudioTimestampHelper(samples_per_second));
+ }
+ // Audio config notification.
+ last_audio_decoder_config_ = audio_decoder_config;
+ new_audio_config_cb_.Run(audio_decoder_config);
+ }
+
+ return true;
+}
+
+void EsParserAdts::DiscardEs(int nbytes) {
+ DCHECK_GE(nbytes, 0);
+ if (nbytes <= 0)
+ return;
+
+ // Adjust the ES position of each PTS.
+ for (EsPtsList::iterator it = pts_list_.begin(); it != pts_list_.end(); ++it)
+ it->first -= nbytes;
+
+ // Discard |nbytes| of ES.
+ es_byte_queue_.Pop(nbytes);
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/es_parser_adts.h b/chromium/media/mp2t/es_parser_adts.h
new file mode 100644
index 00000000000..fd0fe587c07
--- /dev/null
+++ b/chromium/media/mp2t/es_parser_adts.h
@@ -0,0 +1,81 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_ES_PARSER_ADTS_H_
+#define MEDIA_MP2T_ES_PARSER_ADTS_H_
+
+#include <list>
+#include <utility>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/byte_queue.h"
+#include "media/mp2t/es_parser.h"
+
+namespace media {
+class AudioTimestampHelper;
+class BitReader;
+class StreamParserBuffer;
+}
+
+namespace media {
+namespace mp2t {
+
+class EsParserAdts : public EsParser {
+ public:
+ typedef base::Callback<void(const AudioDecoderConfig&)> NewAudioConfigCB;
+
+ EsParserAdts(const NewAudioConfigCB& new_audio_config_cb,
+ const EmitBufferCB& emit_buffer_cb);
+ virtual ~EsParserAdts();
+
+ // EsParser implementation.
+ virtual bool Parse(const uint8* buf, int size,
+ base::TimeDelta pts,
+ base::TimeDelta dts) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ private:
+ // Used to link a PTS with a byte position in the ES stream.
+ typedef std::pair<int, base::TimeDelta> EsPts;
+ typedef std::list<EsPts> EsPtsList;
+
+ // Signal any audio configuration change (if any).
+ // Return false if the current audio config is not
+ // a supported ADTS audio config.
+ bool UpdateAudioConfiguration(const uint8* adts_header);
+
+ // Discard some bytes from the ES stream.
+ void DiscardEs(int nbytes);
+
+ // Callbacks:
+ // - to signal a new audio configuration,
+ // - to send ES buffers.
+ NewAudioConfigCB new_audio_config_cb_;
+ EmitBufferCB emit_buffer_cb_;
+
+ // Bytes of the ES stream that have not been emitted yet.
+ ByteQueue es_byte_queue_;
+
+ // List of PTS associated with a position in the ES stream.
+ EsPtsList pts_list_;
+
+ // Interpolated PTS for frames that don't have one.
+ scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
+
+ // Last audio config.
+ AudioDecoderConfig last_audio_decoder_config_;
+
+ DISALLOW_COPY_AND_ASSIGN(EsParserAdts);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp2t/es_parser_h264.cc b/chromium/media/mp2t/es_parser_h264.cc
new file mode 100644
index 00000000000..99c28893b4a
--- /dev/null
+++ b/chromium/media/mp2t/es_parser_h264.cc
@@ -0,0 +1,507 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/es_parser_h264.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/bit_reader.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/video_frame.h"
+#include "media/mp2t/mp2t_common.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+
+static const int kExtendedSar = 255;
+
+// ISO 14496 part 10
+// VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
+static const int kTableSarWidth[14] = {
+ 1, 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160
+};
+
+static const int kTableSarHeight[14] = {
+ 1, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99
+};
+
+// Remove the start code emulation prevention ( 0x000003 )
+// and return the size of the converted buffer.
+// Note: Size of |buf_rbsp| should be at least |size| to accomodate
+// the worst case.
+static int ConvertToRbsp(const uint8* buf, int size, uint8* buf_rbsp) {
+ int rbsp_size = 0;
+ int zero_count = 0;
+ for (int k = 0; k < size; k++) {
+ if (buf[k] == 0x3 && zero_count >= 2) {
+ zero_count = 0;
+ continue;
+ }
+ if (buf[k] == 0)
+ zero_count++;
+ else
+ zero_count = 0;
+ buf_rbsp[rbsp_size++] = buf[k];
+ }
+ return rbsp_size;
+}
+
+namespace media {
+namespace mp2t {
+
+// ISO 14496 - Part 10: Table 7-1 "NAL unit type codes"
+enum NalUnitType {
+ kNalUnitTypeNonIdrSlice = 1,
+ kNalUnitTypeIdrSlice = 5,
+ kNalUnitTypeSPS = 7,
+ kNalUnitTypePPS = 8,
+ kNalUnitTypeAUD = 9,
+};
+
+class BitReaderH264 : public BitReader {
+ public:
+ BitReaderH264(const uint8* data, off_t size)
+ : BitReader(data, size) { }
+
+ // Read an unsigned exp-golomb value.
+ // Return true if successful.
+ bool ReadBitsExpGolomb(uint32* exp_golomb_value);
+};
+
+bool BitReaderH264::ReadBitsExpGolomb(uint32* exp_golomb_value) {
+ // Get the number of leading zeros.
+ int zero_count = 0;
+ while (true) {
+ int one_bit;
+ RCHECK(ReadBits(1, &one_bit));
+ if (one_bit != 0)
+ break;
+ zero_count++;
+ }
+
+ // If zero_count is greater than 31, the calculated value will overflow.
+ if (zero_count > 31) {
+ SkipBits(zero_count);
+ return false;
+ }
+
+ // Read the actual value.
+ uint32 base = (1 << zero_count) - 1;
+ uint32 offset;
+ RCHECK(ReadBits(zero_count, &offset));
+ *exp_golomb_value = base + offset;
+
+ return true;
+}
+
+EsParserH264::EsParserH264(
+ const NewVideoConfigCB& new_video_config_cb,
+ const EmitBufferCB& emit_buffer_cb)
+ : new_video_config_cb_(new_video_config_cb),
+ emit_buffer_cb_(emit_buffer_cb),
+ es_pos_(0),
+ current_nal_pos_(-1),
+ current_access_unit_pos_(-1),
+ is_key_frame_(false) {
+}
+
+EsParserH264::~EsParserH264() {
+}
+
+bool EsParserH264::Parse(const uint8* buf, int size,
+ base::TimeDelta pts,
+ base::TimeDelta dts) {
+ // Note: Parse is invoked each time a PES packet has been reassembled.
+ // Unfortunately, a PES packet does not necessarily map
+ // to an h264 access unit, although the HLS recommendation is to use one PES
+ // for each access unit (but this is just a recommendation and some streams
+ // do not comply with this recommendation).
+
+ // Link position |raw_es_size| in the ES stream with a timing descriptor.
+ // HLS recommendation: "In AVC video, you should have both a DTS and a
+ // PTS in each PES header".
+ if (dts == kNoTimestamp() && pts == kNoTimestamp()) {
+ DVLOG(1) << "A timestamp must be provided for each reassembled PES";
+ return false;
+ }
+ TimingDesc timing_desc;
+ timing_desc.pts = pts;
+ timing_desc.dts = (dts != kNoTimestamp()) ? dts : pts;
+
+ int raw_es_size;
+ const uint8* raw_es;
+ es_byte_queue_.Peek(&raw_es, &raw_es_size);
+ timing_desc_list_.push_back(
+ std::pair<int, TimingDesc>(raw_es_size, timing_desc));
+
+ // Add the incoming bytes to the ES queue.
+ es_byte_queue_.Push(buf, size);
+
+ // Add NALs from the incoming buffer.
+ if (!ParseInternal())
+ return false;
+
+ // Discard emitted frames
+ // or every byte that was parsed so far if there is no current frame.
+ int skip_count =
+ (current_access_unit_pos_ >= 0) ? current_access_unit_pos_ : es_pos_;
+ DiscardEs(skip_count);
+
+ return true;
+}
+
+void EsParserH264::Flush() {
+ if (current_access_unit_pos_ < 0)
+ return;
+
+ // Force emitting the last access unit.
+ int next_aud_pos;
+ const uint8* raw_es;
+ es_byte_queue_.Peek(&raw_es, &next_aud_pos);
+ EmitFrameIfNeeded(next_aud_pos);
+ current_nal_pos_ = -1;
+ StartFrame(-1);
+
+ // Discard the emitted frame.
+ DiscardEs(next_aud_pos);
+}
+
+void EsParserH264::Reset() {
+ DVLOG(1) << "EsParserH264::Reset";
+ es_byte_queue_.Reset();
+ timing_desc_list_.clear();
+ es_pos_ = 0;
+ current_nal_pos_ = -1;
+ StartFrame(-1);
+ last_video_decoder_config_ = VideoDecoderConfig();
+}
+
+bool EsParserH264::ParseInternal() {
+ int raw_es_size;
+ const uint8* raw_es;
+ es_byte_queue_.Peek(&raw_es, &raw_es_size);
+
+ DCHECK_GE(es_pos_, 0);
+ DCHECK_LT(es_pos_, raw_es_size);
+
+ // Resume h264 es parsing where it was left.
+ for ( ; es_pos_ < raw_es_size - 4; es_pos_++) {
+ // Make sure the syncword is either 00 00 00 01 or 00 00 01
+ if (raw_es[es_pos_ + 0] != 0 || raw_es[es_pos_ + 1] != 0)
+ continue;
+ int syncword_length = 0;
+ if (raw_es[es_pos_ + 2] == 0 && raw_es[es_pos_ + 3] == 1)
+ syncword_length = 4;
+ else if (raw_es[es_pos_ + 2] == 1)
+ syncword_length = 3;
+ else
+ continue;
+
+ // Parse the current NAL (and the new NAL then becomes the current one).
+ if (current_nal_pos_ >= 0) {
+ int nal_size = es_pos_ - current_nal_pos_;
+ DCHECK_GT(nal_size, 0);
+ RCHECK(NalParser(&raw_es[current_nal_pos_], nal_size));
+ }
+ current_nal_pos_ = es_pos_ + syncword_length;
+
+ // Retrieve the NAL type.
+ int nal_header = raw_es[current_nal_pos_];
+ int forbidden_zero_bit = (nal_header >> 7) & 0x1;
+ RCHECK(forbidden_zero_bit == 0);
+ NalUnitType nal_unit_type = static_cast<NalUnitType>(nal_header & 0x1f);
+ DVLOG(LOG_LEVEL_ES) << "nal: offset=" << es_pos_
+ << " type=" << nal_unit_type;
+
+ // Emit a frame if needed.
+ if (nal_unit_type == kNalUnitTypeAUD)
+ EmitFrameIfNeeded(es_pos_);
+
+ // Skip the syncword.
+ es_pos_ += syncword_length;
+ }
+
+ return true;
+}
+
+void EsParserH264::EmitFrameIfNeeded(int next_aud_pos) {
+ // There is no current frame: start a new frame.
+ if (current_access_unit_pos_ < 0) {
+ StartFrame(next_aud_pos);
+ return;
+ }
+
+ // Get the access unit timing info.
+ TimingDesc current_timing_desc;
+ while (!timing_desc_list_.empty() &&
+ timing_desc_list_.front().first <= current_access_unit_pos_) {
+ current_timing_desc = timing_desc_list_.front().second;
+ timing_desc_list_.pop_front();
+ }
+
+ // Emit a frame.
+ int raw_es_size;
+ const uint8* raw_es;
+ es_byte_queue_.Peek(&raw_es, &raw_es_size);
+ int access_unit_size = next_aud_pos - current_access_unit_pos_;
+ scoped_refptr<StreamParserBuffer> stream_parser_buffer =
+ StreamParserBuffer::CopyFrom(
+ &raw_es[current_access_unit_pos_],
+ access_unit_size,
+ is_key_frame_);
+ stream_parser_buffer->SetDecodeTimestamp(current_timing_desc.dts);
+ stream_parser_buffer->set_timestamp(current_timing_desc.pts);
+ emit_buffer_cb_.Run(stream_parser_buffer);
+
+ // Set the current frame position to the next AUD position.
+ StartFrame(next_aud_pos);
+}
+
+void EsParserH264::StartFrame(int aud_pos) {
+ // Two cases:
+ // - if aud_pos < 0, clear the current frame and set |is_key_frame| to a
+ // default value (false).
+ // - if aud_pos >= 0, start a new frame and set |is_key_frame| to true
+ // |is_key_frame_| will be updated while parsing the NALs of that frame.
+ // If any NAL is a non IDR NAL, it will be set to false.
+ current_access_unit_pos_ = aud_pos;
+ is_key_frame_ = (aud_pos >= 0);
+}
+
+void EsParserH264::DiscardEs(int nbytes) {
+ DCHECK_GE(nbytes, 0);
+ if (nbytes == 0)
+ return;
+
+ // Update the position of
+ // - the parser,
+ // - the current NAL,
+ // - the current access unit.
+ es_pos_ -= nbytes;
+ if (es_pos_ < 0)
+ es_pos_ = 0;
+
+ if (current_nal_pos_ >= 0) {
+ DCHECK_GE(current_nal_pos_, nbytes);
+ current_nal_pos_ -= nbytes;
+ }
+ if (current_access_unit_pos_ >= 0) {
+ DCHECK_GE(current_access_unit_pos_, nbytes);
+ current_access_unit_pos_ -= nbytes;
+ }
+
+ // Update the timing information accordingly.
+ std::list<std::pair<int, TimingDesc> >::iterator timing_it
+ = timing_desc_list_.begin();
+ for (; timing_it != timing_desc_list_.end(); ++timing_it)
+ timing_it->first -= nbytes;
+
+ // Discard |nbytes| of ES.
+ es_byte_queue_.Pop(nbytes);
+}
+
+bool EsParserH264::NalParser(const uint8* buf, int size) {
+ // Get the NAL header.
+ if (size < 1) {
+ DVLOG(1) << "NalParser: incomplete NAL";
+ return false;
+ }
+ int nal_header = buf[0];
+ buf += 1;
+ size -= 1;
+
+ int forbidden_zero_bit = (nal_header >> 7) & 0x1;
+ if (forbidden_zero_bit != 0)
+ return false;
+ int nal_ref_idc = (nal_header >> 5) & 0x3;
+ int nal_unit_type = nal_header & 0x1f;
+
+ // Process the NAL content.
+ switch (nal_unit_type) {
+ case kNalUnitTypeSPS:
+ DVLOG(LOG_LEVEL_ES) << "NAL: SPS";
+ // |nal_ref_idc| should not be 0 for a SPS.
+ if (nal_ref_idc == 0)
+ return false;
+ return ProcessSPS(buf, size);
+ case kNalUnitTypeIdrSlice:
+ DVLOG(LOG_LEVEL_ES) << "NAL: IDR slice";
+ return true;
+ case kNalUnitTypeNonIdrSlice:
+ DVLOG(LOG_LEVEL_ES) << "NAL: Non IDR slice";
+ is_key_frame_ = false;
+ return true;
+ case kNalUnitTypePPS:
+ DVLOG(LOG_LEVEL_ES) << "NAL: PPS";
+ return true;
+ case kNalUnitTypeAUD:
+ DVLOG(LOG_LEVEL_ES) << "NAL: AUD";
+ return true;
+ default:
+ DVLOG(LOG_LEVEL_ES) << "NAL: " << nal_unit_type;
+ return true;
+ }
+
+ NOTREACHED();
+ return false;
+}
+
+bool EsParserH264::ProcessSPS(const uint8* buf, int size) {
+ if (size <= 0)
+ return false;
+
+ // Removes start code emulation prevention.
+ // TODO(damienv): refactoring in media/base
+ // so as to have a unique H264 bit reader in Chrome.
+ scoped_ptr<uint8[]> buf_rbsp(new uint8[size]);
+ int rbsp_size = ConvertToRbsp(buf, size, buf_rbsp.get());
+
+ BitReaderH264 bit_reader(buf_rbsp.get(), rbsp_size);
+
+ int profile_idc;
+ int constraint_setX_flag;
+ int level_idc;
+ uint32 seq_parameter_set_id;
+ uint32 log2_max_frame_num_minus4;
+ uint32 pic_order_cnt_type;
+ RCHECK(bit_reader.ReadBits(8, &profile_idc));
+ RCHECK(bit_reader.ReadBits(8, &constraint_setX_flag));
+ RCHECK(bit_reader.ReadBits(8, &level_idc));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&seq_parameter_set_id));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&log2_max_frame_num_minus4));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&pic_order_cnt_type));
+
+ // |pic_order_cnt_type| shall be in the range of 0 to 2.
+ RCHECK(pic_order_cnt_type <= 2);
+ if (pic_order_cnt_type == 0) {
+ uint32 log2_max_pic_order_cnt_lsb_minus4;
+ RCHECK(bit_reader.ReadBitsExpGolomb(&log2_max_pic_order_cnt_lsb_minus4));
+ } else if (pic_order_cnt_type == 1) {
+ // Note: |offset_for_non_ref_pic| and |offset_for_top_to_bottom_field|
+ // corresponds to their codenum not to their actual value.
+ int delta_pic_order_always_zero_flag;
+ uint32 offset_for_non_ref_pic;
+ uint32 offset_for_top_to_bottom_field;
+ uint32 num_ref_frames_in_pic_order_cnt_cycle;
+ RCHECK(bit_reader.ReadBits(1, &delta_pic_order_always_zero_flag));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&offset_for_non_ref_pic));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&offset_for_top_to_bottom_field));
+ RCHECK(
+ bit_reader.ReadBitsExpGolomb(&num_ref_frames_in_pic_order_cnt_cycle));
+ for (uint32 i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ uint32 offset_for_ref_frame_codenum;
+ RCHECK(bit_reader.ReadBitsExpGolomb(&offset_for_ref_frame_codenum));
+ }
+ }
+
+ uint32 num_ref_frames;
+ int gaps_in_frame_num_value_allowed_flag;
+ uint32 pic_width_in_mbs_minus1;
+ uint32 pic_height_in_map_units_minus1;
+ RCHECK(bit_reader.ReadBitsExpGolomb(&num_ref_frames));
+ RCHECK(bit_reader.ReadBits(1, &gaps_in_frame_num_value_allowed_flag));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&pic_width_in_mbs_minus1));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&pic_height_in_map_units_minus1));
+
+ int frame_mbs_only_flag;
+ RCHECK(bit_reader.ReadBits(1, &frame_mbs_only_flag));
+ if (!frame_mbs_only_flag) {
+ int mb_adaptive_frame_field_flag;
+ RCHECK(bit_reader.ReadBits(1, &mb_adaptive_frame_field_flag));
+ }
+
+ int direct_8x8_inference_flag;
+ RCHECK(bit_reader.ReadBits(1, &direct_8x8_inference_flag));
+
+ int frame_cropping_flag;
+ uint32 frame_crop_left_offset = 0;
+ uint32 frame_crop_right_offset = 0;
+ uint32 frame_crop_top_offset = 0;
+ uint32 frame_crop_bottom_offset = 0;
+ RCHECK(bit_reader.ReadBits(1, &frame_cropping_flag));
+ if (frame_cropping_flag) {
+ RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_left_offset));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_right_offset));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_top_offset));
+ RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_bottom_offset));
+ }
+
+ int vui_parameters_present_flag;
+ RCHECK(bit_reader.ReadBits(1, &vui_parameters_present_flag));
+ int sar_width = 1;
+ int sar_height = 1;
+ if (vui_parameters_present_flag) {
+ // Read only the aspect ratio information from the VUI section.
+ // TODO(damienv): check whether other VUI info are useful.
+ int aspect_ratio_info_present_flag;
+ RCHECK(bit_reader.ReadBits(1, &aspect_ratio_info_present_flag));
+ if (aspect_ratio_info_present_flag) {
+ int aspect_ratio_idc;
+ RCHECK(bit_reader.ReadBits(8, &aspect_ratio_idc));
+ if (aspect_ratio_idc == kExtendedSar) {
+ RCHECK(bit_reader.ReadBits(16, &sar_width));
+ RCHECK(bit_reader.ReadBits(16, &sar_height));
+ } else if (aspect_ratio_idc < 14) {
+ sar_width = kTableSarWidth[aspect_ratio_idc];
+ sar_height = kTableSarHeight[aspect_ratio_idc];
+ }
+ }
+ }
+
+ if (sar_width != sar_height) {
+ // TODO(damienv): Support non square pixels.
+ DVLOG(1)
+ << "Non square pixel not supported yet:"
+ << " sar_width=" << sar_width
+ << " sar_height=" << sar_height;
+ return false;
+ }
+
+ // TODO(damienv): a MAP unit can be either 16 or 32 pixels.
+ // although it's 16 pixels for progressive non MBAFF frames.
+ gfx::Size coded_size((pic_width_in_mbs_minus1 + 1) * 16,
+ (pic_height_in_map_units_minus1 + 1) * 16);
+ gfx::Rect visible_rect(
+ frame_crop_left_offset,
+ frame_crop_top_offset,
+ (coded_size.width() - frame_crop_right_offset) - frame_crop_left_offset,
+ (coded_size.height() - frame_crop_bottom_offset) - frame_crop_top_offset);
+
+ // TODO(damienv): calculate the natural size based
+ // on the possible aspect ratio coded in the VUI parameters.
+ gfx::Size natural_size(visible_rect.width(),
+ visible_rect.height());
+
+ // TODO(damienv):
+ // Assuming the SPS is used right away by the PPS
+ // and the slice headers is a strong assumption.
+ // In theory, we should process the SPS and PPS
+ // and only when one of the slice header is switching
+ // the PPS id, the video decoder config should be changed.
+ VideoDecoderConfig video_decoder_config(
+ kCodecH264,
+ VIDEO_CODEC_PROFILE_UNKNOWN, // TODO(damienv)
+ VideoFrame::YV12,
+ coded_size,
+ visible_rect,
+ natural_size,
+ NULL, 0,
+ false);
+
+ if (!video_decoder_config.Matches(last_video_decoder_config_)) {
+ DVLOG(1) << "Profile IDC: " << profile_idc;
+ DVLOG(1) << "Level IDC: " << level_idc;
+ DVLOG(1) << "Pic width: " << (pic_width_in_mbs_minus1 + 1) * 16;
+ DVLOG(1) << "Pic height: " << (pic_height_in_map_units_minus1 + 1) * 16;
+ DVLOG(1) << "log2_max_frame_num_minus4: " << log2_max_frame_num_minus4;
+ last_video_decoder_config_ = video_decoder_config;
+ new_video_config_cb_.Run(video_decoder_config);
+ }
+
+ return true;
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/es_parser_h264.h b/chromium/media/mp2t/es_parser_h264.h
new file mode 100644
index 00000000000..5cb247e8961
--- /dev/null
+++ b/chromium/media/mp2t/es_parser_h264.h
@@ -0,0 +1,97 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_ES_PARSER_H264_H_
+#define MEDIA_MP2T_ES_PARSER_H264_H_
+
+#include <list>
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/time/time.h"
+#include "media/base/byte_queue.h"
+#include "media/base/video_decoder_config.h"
+#include "media/mp2t/es_parser.h"
+
+namespace media {
+class BitReader;
+class StreamParserBuffer;
+}
+
+namespace media {
+namespace mp2t {
+
+// Remark:
+// In this h264 parser, frame splitting is based on AUD nals.
+// Mpeg2 TS spec: "2.14 Carriage of Rec. ITU-T H.264 | ISO/IEC 14496-10 video"
+// "Each AVC access unit shall contain an access unit delimiter NAL Unit;"
+//
+class EsParserH264 : public EsParser {
+ public:
+ typedef base::Callback<void(const VideoDecoderConfig&)> NewVideoConfigCB;
+
+ EsParserH264(const NewVideoConfigCB& new_video_config_cb,
+ const EmitBufferCB& emit_buffer_cb);
+ virtual ~EsParserH264();
+
+ // EsParser implementation.
+ virtual bool Parse(const uint8* buf, int size,
+ base::TimeDelta pts,
+ base::TimeDelta dts) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ private:
+ struct TimingDesc {
+ base::TimeDelta dts;
+ base::TimeDelta pts;
+ };
+
+ // H264 parser.
+ // It resumes parsing from byte position |es_pos_|.
+ bool ParseInternal();
+
+ // Emit a frame if a frame has been started earlier.
+ void EmitFrameIfNeeded(int next_aud_pos);
+
+ // Start a new frame.
+ // Note: if aud_pos < 0, clear the current frame.
+ void StartFrame(int aud_pos);
+
+ // Discard |nbytes| of ES from the ES byte queue.
+ void DiscardEs(int nbytes);
+
+ // Parse a NAL / SPS.
+ // Returns true if successful (compliant bitstream).
+ bool NalParser(const uint8* buf, int size);
+ bool ProcessSPS(const uint8* buf, int size);
+
+ // Callbacks to pass the stream configuration and the frames.
+ NewVideoConfigCB new_video_config_cb_;
+ EmitBufferCB emit_buffer_cb_;
+
+ // Bytes of the ES stream that have not been emitted yet.
+ ByteQueue es_byte_queue_;
+ std::list<std::pair<int, TimingDesc> > timing_desc_list_;
+
+ // H264 parser state.
+ // Note: |current_access_unit_pos_| is pointing to an annexB syncword
+ // while |current_nal_pos_| is pointing to the NAL unit
+ // (i.e. does not include the annexB syncword).
+ int es_pos_;
+ int current_nal_pos_;
+ int current_access_unit_pos_;
+ bool is_key_frame_;
+
+ // Last video decoder config.
+ VideoDecoderConfig last_video_decoder_config_;
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp2t/mp2t_common.h b/chromium/media/mp2t/mp2t_common.h
new file mode 100644
index 00000000000..7bc8d7b3247
--- /dev/null
+++ b/chromium/media/mp2t/mp2t_common.h
@@ -0,0 +1,21 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_MP2T_COMMON_H_
+#define MEDIA_MP2T_MP2T_COMMON_H_
+
+#define LOG_LEVEL_TS 5
+#define LOG_LEVEL_PES 4
+#define LOG_LEVEL_ES 3
+
+#define RCHECK(x) \
+ do { \
+ if (!(x)) { \
+ DLOG(WARNING) << "Failure while parsing Mpeg2TS: " << #x; \
+ return false; \
+ } \
+ } while (0)
+
+#endif
+
diff --git a/chromium/media/mp2t/mp2t_stream_parser.cc b/chromium/media/mp2t/mp2t_stream_parser.cc
new file mode 100644
index 00000000000..68fca5cedd2
--- /dev/null
+++ b/chromium/media/mp2t/mp2t_stream_parser.cc
@@ -0,0 +1,616 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/mp2t_stream_parser.h"
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/stl_util.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/video_decoder_config.h"
+#include "media/mp2t/es_parser.h"
+#include "media/mp2t/es_parser_adts.h"
+#include "media/mp2t/es_parser_h264.h"
+#include "media/mp2t/mp2t_common.h"
+#include "media/mp2t/ts_packet.h"
+#include "media/mp2t/ts_section.h"
+#include "media/mp2t/ts_section_pat.h"
+#include "media/mp2t/ts_section_pes.h"
+#include "media/mp2t/ts_section_pmt.h"
+
+namespace media {
+namespace mp2t {
+
+enum StreamType {
+ // ISO-13818.1 / ITU H.222 Table 2.34 "Stream type assignments"
+ kStreamTypeMpeg1Audio = 0x3,
+ kStreamTypeAAC = 0xf,
+ kStreamTypeAVC = 0x1b,
+};
+
+class PidState {
+ public:
+ enum PidType {
+ kPidPat,
+ kPidPmt,
+ kPidAudioPes,
+ kPidVideoPes,
+ };
+
+ PidState(int pid, PidType pid_tyoe,
+ scoped_ptr<TsSection> section_parser);
+
+ // Extract the content of the TS packet and parse it.
+ // Return true if successful.
+ bool PushTsPacket(const TsPacket& ts_packet);
+
+ // Flush the PID state (possibly emitting some pending frames)
+ // and reset its state.
+ void Flush();
+
+ // Enable/disable the PID.
+ // Disabling a PID will reset its state and ignore any further incoming TS
+ // packets.
+ void Enable();
+ void Disable();
+ bool IsEnabled() const;
+
+ PidType pid_type() const { return pid_type_; }
+
+ private:
+ void ResetState();
+
+ int pid_;
+ PidType pid_type_;
+ scoped_ptr<TsSection> section_parser_;
+
+ bool enable_;
+
+ int continuity_counter_;
+};
+
+PidState::PidState(int pid, PidType pid_type,
+ scoped_ptr<TsSection> section_parser)
+ : pid_(pid),
+ pid_type_(pid_type),
+ section_parser_(section_parser.Pass()),
+ enable_(false),
+ continuity_counter_(-1) {
+ DCHECK(section_parser_);
+}
+
+bool PidState::PushTsPacket(const TsPacket& ts_packet) {
+ DCHECK_EQ(ts_packet.pid(), pid_);
+
+ // The current PID is not part of the PID filter,
+ // just discard the incoming TS packet.
+ if (!enable_)
+ return true;
+
+ int expected_continuity_counter = (continuity_counter_ + 1) % 16;
+ if (continuity_counter_ >= 0 &&
+ ts_packet.continuity_counter() != expected_continuity_counter) {
+ DVLOG(1) << "TS discontinuity detected for pid: " << pid_;
+ return false;
+ }
+
+ bool status = section_parser_->Parse(
+ ts_packet.payload_unit_start_indicator(),
+ ts_packet.payload(),
+ ts_packet.payload_size());
+
+ // At the minimum, when parsing failed, auto reset the section parser.
+ // Components that use the StreamParser can take further action if needed.
+ if (!status) {
+ DVLOG(1) << "Parsing failed for pid = " << pid_;
+ ResetState();
+ }
+
+ return status;
+}
+
+void PidState::Flush() {
+ section_parser_->Flush();
+ ResetState();
+}
+
+void PidState::Enable() {
+ enable_ = true;
+}
+
+void PidState::Disable() {
+ if (!enable_)
+ return;
+
+ ResetState();
+ enable_ = false;
+}
+
+bool PidState::IsEnabled() const {
+ return enable_;
+}
+
+void PidState::ResetState() {
+ section_parser_->Reset();
+ continuity_counter_ = -1;
+}
+
+Mp2tStreamParser::BufferQueueWithConfig::BufferQueueWithConfig(
+ bool is_cfg_sent,
+ const AudioDecoderConfig& audio_cfg,
+ const VideoDecoderConfig& video_cfg)
+ : is_config_sent(is_cfg_sent),
+ audio_config(audio_cfg),
+ video_config(video_cfg) {
+}
+
+Mp2tStreamParser::BufferQueueWithConfig::~BufferQueueWithConfig() {
+}
+
+Mp2tStreamParser::Mp2tStreamParser()
+ : selected_audio_pid_(-1),
+ selected_video_pid_(-1),
+ is_initialized_(false),
+ segment_started_(false),
+ first_video_frame_in_segment_(true) {
+}
+
+Mp2tStreamParser::~Mp2tStreamParser() {
+ STLDeleteValues(&pids_);
+}
+
+void Mp2tStreamParser::Init(
+ const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ const NewTextBuffersCB& text_cb,
+ const NeedKeyCB& need_key_cb,
+ const AddTextTrackCB& add_text_track_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) {
+ DCHECK(!is_initialized_);
+ DCHECK(init_cb_.is_null());
+ DCHECK(!init_cb.is_null());
+ DCHECK(!config_cb.is_null());
+ DCHECK(!new_buffers_cb.is_null());
+ DCHECK(!need_key_cb.is_null());
+ DCHECK(!end_of_segment_cb.is_null());
+
+ init_cb_ = init_cb;
+ config_cb_ = config_cb;
+ new_buffers_cb_ = new_buffers_cb;
+ need_key_cb_ = need_key_cb;
+ new_segment_cb_ = new_segment_cb;
+ end_of_segment_cb_ = end_of_segment_cb;
+ log_cb_ = log_cb;
+}
+
+void Mp2tStreamParser::Flush() {
+ DVLOG(1) << "Mp2tStreamParser::Flush";
+
+ // Flush the buffers and reset the pids.
+ for (std::map<int, PidState*>::iterator it = pids_.begin();
+ it != pids_.end(); ++it) {
+ DVLOG(1) << "Flushing PID: " << it->first;
+ PidState* pid_state = it->second;
+ pid_state->Flush();
+ delete pid_state;
+ }
+ pids_.clear();
+ EmitRemainingBuffers();
+ buffer_queue_chain_.clear();
+
+ // End of the segment.
+ // Note: does not need to invoke |end_of_segment_cb_| since flushing the
+ // stream parser already involves the end of the current segment.
+ segment_started_ = false;
+ first_video_frame_in_segment_ = true;
+
+ // Remove any bytes left in the TS buffer.
+ // (i.e. any partial TS packet => less than 188 bytes).
+ ts_byte_queue_.Reset();
+
+ // Reset the selected PIDs.
+ selected_audio_pid_ = -1;
+ selected_video_pid_ = -1;
+}
+
+bool Mp2tStreamParser::Parse(const uint8* buf, int size) {
+ DVLOG(1) << "Mp2tStreamParser::Parse size=" << size;
+
+ // Add the data to the parser state.
+ ts_byte_queue_.Push(buf, size);
+
+ while (true) {
+ const uint8* ts_buffer;
+ int ts_buffer_size;
+ ts_byte_queue_.Peek(&ts_buffer, &ts_buffer_size);
+ if (ts_buffer_size < TsPacket::kPacketSize)
+ break;
+
+ // Synchronization.
+ int skipped_bytes = TsPacket::Sync(ts_buffer, ts_buffer_size);
+ if (skipped_bytes > 0) {
+ DVLOG(1) << "Packet not aligned on a TS syncword:"
+ << " skipped_bytes=" << skipped_bytes;
+ ts_byte_queue_.Pop(skipped_bytes);
+ continue;
+ }
+
+ // Parse the TS header, skipping 1 byte if the header is invalid.
+ scoped_ptr<TsPacket> ts_packet(TsPacket::Parse(ts_buffer, ts_buffer_size));
+ if (!ts_packet) {
+ DVLOG(1) << "Error: invalid TS packet";
+ ts_byte_queue_.Pop(1);
+ continue;
+ }
+ DVLOG(LOG_LEVEL_TS)
+ << "Processing PID=" << ts_packet->pid()
+ << " start_unit=" << ts_packet->payload_unit_start_indicator();
+
+ // Parse the section.
+ std::map<int, PidState*>::iterator it = pids_.find(ts_packet->pid());
+ if (it == pids_.end() &&
+ ts_packet->pid() == TsSection::kPidPat) {
+ // Create the PAT state here if needed.
+ scoped_ptr<TsSection> pat_section_parser(
+ new TsSectionPat(
+ base::Bind(&Mp2tStreamParser::RegisterPmt,
+ base::Unretained(this))));
+ scoped_ptr<PidState> pat_pid_state(
+ new PidState(ts_packet->pid(), PidState::kPidPat,
+ pat_section_parser.Pass()));
+ pat_pid_state->Enable();
+ it = pids_.insert(
+ std::pair<int, PidState*>(ts_packet->pid(),
+ pat_pid_state.release())).first;
+ }
+
+ if (it != pids_.end()) {
+ if (!it->second->PushTsPacket(*ts_packet))
+ return false;
+ } else {
+ DVLOG(LOG_LEVEL_TS) << "Ignoring TS packet for pid: " << ts_packet->pid();
+ }
+
+ // Go to the next packet.
+ ts_byte_queue_.Pop(TsPacket::kPacketSize);
+ }
+
+ RCHECK(FinishInitializationIfNeeded());
+
+ // Emit the A/V buffers that kept accumulating during TS parsing.
+ return EmitRemainingBuffers();
+}
+
+void Mp2tStreamParser::RegisterPmt(int program_number, int pmt_pid) {
+ DVLOG(1) << "RegisterPmt:"
+ << " program_number=" << program_number
+ << " pmt_pid=" << pmt_pid;
+
+ // Only one TS program is allowed. Ignore the incoming program map table,
+ // if there is already one registered.
+ for (std::map<int, PidState*>::iterator it = pids_.begin();
+ it != pids_.end(); ++it) {
+ PidState* pid_state = it->second;
+ if (pid_state->pid_type() == PidState::kPidPmt) {
+ DVLOG_IF(1, pmt_pid != it->first) << "More than one program is defined";
+ return;
+ }
+ }
+
+ // Create the PMT state here if needed.
+ DVLOG(1) << "Create a new PMT parser";
+ scoped_ptr<TsSection> pmt_section_parser(
+ new TsSectionPmt(
+ base::Bind(&Mp2tStreamParser::RegisterPes,
+ base::Unretained(this), pmt_pid)));
+ scoped_ptr<PidState> pmt_pid_state(
+ new PidState(pmt_pid, PidState::kPidPmt, pmt_section_parser.Pass()));
+ pmt_pid_state->Enable();
+ pids_.insert(std::pair<int, PidState*>(pmt_pid, pmt_pid_state.release()));
+}
+
+void Mp2tStreamParser::RegisterPes(int pmt_pid,
+ int pes_pid,
+ int stream_type) {
+ // TODO(damienv): check there is no mismatch if the entry already exists.
+ DVLOG(1) << "RegisterPes:"
+ << " pes_pid=" << pes_pid
+ << " stream_type=" << std::hex << stream_type << std::dec;
+ std::map<int, PidState*>::iterator it = pids_.find(pes_pid);
+ if (it != pids_.end())
+ return;
+
+ // Create a stream parser corresponding to the stream type.
+ bool is_audio = false;
+ scoped_ptr<EsParser> es_parser;
+ if (stream_type == kStreamTypeAVC) {
+ es_parser.reset(
+ new EsParserH264(
+ base::Bind(&Mp2tStreamParser::OnVideoConfigChanged,
+ base::Unretained(this),
+ pes_pid),
+ base::Bind(&Mp2tStreamParser::OnEmitVideoBuffer,
+ base::Unretained(this),
+ pes_pid)));
+ } else if (stream_type == kStreamTypeAAC) {
+ es_parser.reset(
+ new EsParserAdts(
+ base::Bind(&Mp2tStreamParser::OnAudioConfigChanged,
+ base::Unretained(this),
+ pes_pid),
+ base::Bind(&Mp2tStreamParser::OnEmitAudioBuffer,
+ base::Unretained(this),
+ pes_pid)));
+ is_audio = true;
+ } else {
+ return;
+ }
+
+ // Create the PES state here.
+ DVLOG(1) << "Create a new PES state";
+ scoped_ptr<TsSection> pes_section_parser(
+ new TsSectionPes(es_parser.Pass()));
+ PidState::PidType pid_type =
+ is_audio ? PidState::kPidAudioPes : PidState::kPidVideoPes;
+ scoped_ptr<PidState> pes_pid_state(
+ new PidState(pes_pid, pid_type, pes_section_parser.Pass()));
+ pids_.insert(std::pair<int, PidState*>(pes_pid, pes_pid_state.release()));
+
+ // A new PES pid has been added, the PID filter might change.
+ UpdatePidFilter();
+}
+
+void Mp2tStreamParser::UpdatePidFilter() {
+ // Applies the HLS rule to select the default audio/video PIDs:
+ // select the audio/video streams with the lowest PID.
+ // TODO(damienv): this can be changed when the StreamParser interface
+ // supports multiple audio/video streams.
+ PidMap::iterator lowest_audio_pid = pids_.end();
+ PidMap::iterator lowest_video_pid = pids_.end();
+ for (PidMap::iterator it = pids_.begin(); it != pids_.end(); ++it) {
+ int pid = it->first;
+ PidState* pid_state = it->second;
+ if (pid_state->pid_type() == PidState::kPidAudioPes &&
+ (lowest_audio_pid == pids_.end() || pid < lowest_audio_pid->first))
+ lowest_audio_pid = it;
+ if (pid_state->pid_type() == PidState::kPidVideoPes &&
+ (lowest_video_pid == pids_.end() || pid < lowest_video_pid->first))
+ lowest_video_pid = it;
+ }
+
+ // Enable both the lowest audio and video PIDs.
+ if (lowest_audio_pid != pids_.end()) {
+ DVLOG(1) << "Enable audio pid: " << lowest_audio_pid->first;
+ lowest_audio_pid->second->Enable();
+ selected_audio_pid_ = lowest_audio_pid->first;
+ }
+ if (lowest_video_pid != pids_.end()) {
+ DVLOG(1) << "Enable video pid: " << lowest_audio_pid->first;
+ lowest_video_pid->second->Enable();
+ selected_video_pid_ = lowest_video_pid->first;
+ }
+
+ // Disable all the other audio and video PIDs.
+ for (PidMap::iterator it = pids_.begin(); it != pids_.end(); ++it) {
+ PidState* pid_state = it->second;
+ if (it != lowest_audio_pid && it != lowest_video_pid &&
+ (pid_state->pid_type() == PidState::kPidAudioPes ||
+ pid_state->pid_type() == PidState::kPidVideoPes))
+ pid_state->Disable();
+ }
+}
+
+void Mp2tStreamParser::OnVideoConfigChanged(
+ int pes_pid,
+ const VideoDecoderConfig& video_decoder_config) {
+ DVLOG(1) << "OnVideoConfigChanged for pid=" << pes_pid;
+ DCHECK_EQ(pes_pid, selected_video_pid_);
+ DCHECK(video_decoder_config.IsValidConfig());
+
+ // Create a new entry in |buffer_queue_chain_| with the updated configs.
+ BufferQueueWithConfig buffer_queue_with_config(
+ false,
+ buffer_queue_chain_.empty()
+ ? AudioDecoderConfig() : buffer_queue_chain_.back().audio_config,
+ video_decoder_config);
+ buffer_queue_chain_.push_back(buffer_queue_with_config);
+
+ // Replace any non valid config with the 1st valid entry.
+ // This might happen if there was no available config before.
+ for (std::list<BufferQueueWithConfig>::iterator it =
+ buffer_queue_chain_.begin(); it != buffer_queue_chain_.end(); ++it) {
+ if (it->video_config.IsValidConfig())
+ break;
+ it->video_config = video_decoder_config;
+ }
+}
+
+void Mp2tStreamParser::OnAudioConfigChanged(
+ int pes_pid,
+ const AudioDecoderConfig& audio_decoder_config) {
+ DVLOG(1) << "OnAudioConfigChanged for pid=" << pes_pid;
+ DCHECK_EQ(pes_pid, selected_audio_pid_);
+ DCHECK(audio_decoder_config.IsValidConfig());
+
+ // Create a new entry in |buffer_queue_chain_| with the updated configs.
+ BufferQueueWithConfig buffer_queue_with_config(
+ false,
+ audio_decoder_config,
+ buffer_queue_chain_.empty()
+ ? VideoDecoderConfig() : buffer_queue_chain_.back().video_config);
+ buffer_queue_chain_.push_back(buffer_queue_with_config);
+
+ // Replace any non valid config with the 1st valid entry.
+ // This might happen if there was no available config before.
+ for (std::list<BufferQueueWithConfig>::iterator it =
+ buffer_queue_chain_.begin(); it != buffer_queue_chain_.end(); ++it) {
+ if (it->audio_config.IsValidConfig())
+ break;
+ it->audio_config = audio_decoder_config;
+ }
+}
+
+bool Mp2tStreamParser::FinishInitializationIfNeeded() {
+ // Nothing to be done if already initialized.
+ if (is_initialized_)
+ return true;
+
+ // Wait for more data to come to finish initialization.
+ if (buffer_queue_chain_.empty())
+ return true;
+
+ // Wait for more data to come if one of the config is not available.
+ BufferQueueWithConfig& queue_with_config = buffer_queue_chain_.front();
+ if (selected_audio_pid_ > 0 &&
+ !queue_with_config.audio_config.IsValidConfig())
+ return true;
+ if (selected_video_pid_ > 0 &&
+ !queue_with_config.video_config.IsValidConfig())
+ return true;
+
+ // Pass the config before invoking the initialization callback.
+ RCHECK(config_cb_.Run(queue_with_config.audio_config,
+ queue_with_config.video_config));
+ queue_with_config.is_config_sent = true;
+
+ // For Mpeg2 TS, the duration is not known.
+ DVLOG(1) << "Mpeg2TS stream parser initialization done";
+ init_cb_.Run(true, kInfiniteDuration());
+ is_initialized_ = true;
+
+ return true;
+}
+
+void Mp2tStreamParser::OnEmitAudioBuffer(
+ int pes_pid,
+ scoped_refptr<StreamParserBuffer> stream_parser_buffer) {
+ DCHECK_EQ(pes_pid, selected_audio_pid_);
+
+ DVLOG(LOG_LEVEL_ES)
+ << "OnEmitAudioBuffer: "
+ << " size="
+ << stream_parser_buffer->data_size()
+ << " dts="
+ << stream_parser_buffer->GetDecodeTimestamp().InMilliseconds()
+ << " pts="
+ << stream_parser_buffer->timestamp().InMilliseconds();
+ stream_parser_buffer->set_timestamp(
+ stream_parser_buffer->timestamp() - time_offset_);
+ stream_parser_buffer->SetDecodeTimestamp(
+ stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
+
+ // Ignore the incoming buffer if it is not associated with any config.
+ if (buffer_queue_chain_.empty()) {
+ DVLOG(1) << "Ignoring audio buffer with no corresponding audio config";
+ return;
+ }
+
+ buffer_queue_chain_.back().audio_queue.push_back(stream_parser_buffer);
+}
+
+void Mp2tStreamParser::OnEmitVideoBuffer(
+ int pes_pid,
+ scoped_refptr<StreamParserBuffer> stream_parser_buffer) {
+ DCHECK_EQ(pes_pid, selected_video_pid_);
+
+ DVLOG(LOG_LEVEL_ES)
+ << "OnEmitVideoBuffer"
+ << " size="
+ << stream_parser_buffer->data_size()
+ << " dts="
+ << stream_parser_buffer->GetDecodeTimestamp().InMilliseconds()
+ << " pts="
+ << stream_parser_buffer->timestamp().InMilliseconds()
+ << " IsKeyframe="
+ << stream_parser_buffer->IsKeyframe();
+ stream_parser_buffer->set_timestamp(
+ stream_parser_buffer->timestamp() - time_offset_);
+ stream_parser_buffer->SetDecodeTimestamp(
+ stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
+
+ // Ignore the incoming buffer if it is not associated with any config.
+ if (buffer_queue_chain_.empty()) {
+ DVLOG(1) << "Ignoring video buffer with no corresponding video config:"
+ << " keyframe=" << stream_parser_buffer->IsKeyframe()
+ << " dts="
+ << stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
+ return;
+ }
+
+ // A segment cannot start with a non key frame.
+ // Ignore the frame if that's the case.
+ if (first_video_frame_in_segment_ && !stream_parser_buffer->IsKeyframe()) {
+ DVLOG(1) << "Ignoring non-key frame:"
+ << " dts="
+ << stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
+ return;
+ }
+
+ first_video_frame_in_segment_ = false;
+ buffer_queue_chain_.back().video_queue.push_back(stream_parser_buffer);
+}
+
+bool Mp2tStreamParser::EmitRemainingBuffers() {
+ DVLOG(LOG_LEVEL_ES) << "Mp2tStreamParser::EmitRemainingBuffers";
+
+ // No buffer should be sent until fully initialized.
+ if (!is_initialized_)
+ return true;
+
+ if (buffer_queue_chain_.empty())
+ return true;
+
+ // Keep track of the last audio and video config sent.
+ AudioDecoderConfig last_audio_config =
+ buffer_queue_chain_.back().audio_config;
+ VideoDecoderConfig last_video_config =
+ buffer_queue_chain_.back().video_config;
+
+ // Buffer emission.
+ while (!buffer_queue_chain_.empty()) {
+ // Start a segment if needed.
+ if (!segment_started_) {
+ DVLOG(1) << "Starting a new segment";
+ segment_started_ = true;
+ new_segment_cb_.Run();
+ }
+
+ // Update the audio and video config if needed.
+ BufferQueueWithConfig& queue_with_config = buffer_queue_chain_.front();
+ if (!queue_with_config.is_config_sent) {
+ if (!config_cb_.Run(queue_with_config.audio_config,
+ queue_with_config.video_config))
+ return false;
+ queue_with_config.is_config_sent = true;
+ }
+
+ // Add buffers.
+ if (!queue_with_config.audio_queue.empty() ||
+ !queue_with_config.video_queue.empty()) {
+ if (!new_buffers_cb_.Run(queue_with_config.audio_queue,
+ queue_with_config.video_queue)) {
+ return false;
+ }
+ }
+
+ buffer_queue_chain_.pop_front();
+ }
+
+ // Push an empty queue with the last audio/video config
+ // so that buffers with the same config can be added later on.
+ BufferQueueWithConfig queue_with_config(
+ true, last_audio_config, last_video_config);
+ buffer_queue_chain_.push_back(queue_with_config);
+
+ return true;
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/mp2t_stream_parser.h b/chromium/media/mp2t/mp2t_stream_parser.h
new file mode 100644
index 00000000000..dcab5595ff8
--- /dev/null
+++ b/chromium/media/mp2t/mp2t_stream_parser.h
@@ -0,0 +1,133 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_MP2T_STREAM_PARSER_H_
+#define MEDIA_MP2T_MP2T_STREAM_PARSER_H_
+
+#include <list>
+#include <map>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/byte_queue.h"
+#include "media/base/media_export.h"
+#include "media/base/stream_parser.h"
+#include "media/base/video_decoder_config.h"
+
+namespace media {
+
+class StreamParserBuffer;
+
+namespace mp2t {
+
+class PidState;
+
+class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
+ public:
+ Mp2tStreamParser();
+ virtual ~Mp2tStreamParser();
+
+ // StreamParser implementation.
+ virtual void Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ const NewTextBuffersCB& text_cb,
+ const NeedKeyCB& need_key_cb,
+ const AddTextTrackCB& add_text_track_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual bool Parse(const uint8* buf, int size) OVERRIDE;
+
+ private:
+ typedef std::map<int, PidState*> PidMap;
+
+ struct BufferQueueWithConfig {
+ BufferQueueWithConfig(bool is_cfg_sent,
+ const AudioDecoderConfig& audio_cfg,
+ const VideoDecoderConfig& video_cfg);
+ ~BufferQueueWithConfig();
+
+ bool is_config_sent;
+ AudioDecoderConfig audio_config;
+ StreamParser::BufferQueue audio_queue;
+ VideoDecoderConfig video_config;
+ StreamParser::BufferQueue video_queue;
+ };
+
+ // Callback invoked to register a Program Map Table.
+ // Note: Does nothing if the PID is already registered.
+ void RegisterPmt(int program_number, int pmt_pid);
+
+ // Callback invoked to register a PES pid.
+ // Possible values for |stream_type| are defined in:
+ // ISO-13818.1 / ITU H.222 Table 2.34 "Stream type assignments".
+ // |pes_pid| is part of the Program Map Table refered by |pmt_pid|.
+ void RegisterPes(int pmt_pid, int pes_pid, int stream_type);
+
+ // Since the StreamParser interface allows only one audio & video streams,
+ // an automatic PID filtering should be applied to select the audio & video
+ // streams.
+ void UpdatePidFilter();
+
+ // Callback invoked each time the audio/video decoder configuration is
+ // changed.
+ void OnVideoConfigChanged(int pes_pid,
+ const VideoDecoderConfig& video_decoder_config);
+ void OnAudioConfigChanged(int pes_pid,
+ const AudioDecoderConfig& audio_decoder_config);
+
+ // Invoke the initialization callback if needed.
+ bool FinishInitializationIfNeeded();
+
+ // Callback invoked by the ES stream parser
+ // to emit a new audio/video access unit.
+ void OnEmitAudioBuffer(
+ int pes_pid,
+ scoped_refptr<StreamParserBuffer> stream_parser_buffer);
+ void OnEmitVideoBuffer(
+ int pes_pid,
+ scoped_refptr<StreamParserBuffer> stream_parser_buffer);
+ bool EmitRemainingBuffers();
+
+ // List of callbacks.
+ InitCB init_cb_;
+ NewConfigCB config_cb_;
+ NewBuffersCB new_buffers_cb_;
+ NeedKeyCB need_key_cb_;
+ NewMediaSegmentCB new_segment_cb_;
+ base::Closure end_of_segment_cb_;
+ LogCB log_cb_;
+
+ // Bytes of the TS stream.
+ ByteQueue ts_byte_queue_;
+
+ // List of PIDs and their state.
+ PidMap pids_;
+
+ // Selected audio and video PIDs.
+ int selected_audio_pid_;
+ int selected_video_pid_;
+
+ // Pending audio & video buffers.
+ std::list<BufferQueueWithConfig> buffer_queue_chain_;
+
+ // Whether |init_cb_| has been invoked.
+ bool is_initialized_;
+
+ // Indicate whether a segment was started.
+ bool segment_started_;
+ bool first_video_frame_in_segment_;
+ base::TimeDelta time_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(Mp2tStreamParser);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp2t/mp2t_stream_parser_unittest.cc b/chromium/media/mp2t/mp2t_stream_parser_unittest.cc
new file mode 100644
index 00000000000..12a3b9519da
--- /dev/null
+++ b/chromium/media/mp2t/mp2t_stream_parser_unittest.cc
@@ -0,0 +1,189 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <string>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/test_data_util.h"
+#include "media/base/video_decoder_config.h"
+#include "media/mp2t/mp2t_stream_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace mp2t {
+
+class Mp2tStreamParserTest : public testing::Test {
+ public:
+ Mp2tStreamParserTest()
+ : audio_frame_count_(0),
+ video_frame_count_(0),
+ video_min_dts_(kNoTimestamp()),
+ video_max_dts_(kNoTimestamp()) {
+ parser_.reset(new Mp2tStreamParser());
+ }
+
+ protected:
+ scoped_ptr<Mp2tStreamParser> parser_;
+ int audio_frame_count_;
+ int video_frame_count_;
+ base::TimeDelta video_min_dts_;
+ base::TimeDelta video_max_dts_;
+
+ bool AppendData(const uint8* data, size_t length) {
+ return parser_->Parse(data, length);
+ }
+
+ bool AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
+ const uint8* start = data;
+ const uint8* end = data + length;
+ while (start < end) {
+ size_t append_size = std::min(piece_size,
+ static_cast<size_t>(end - start));
+ if (!AppendData(start, append_size))
+ return false;
+ start += append_size;
+ }
+ return true;
+ }
+
+ void OnInit(bool init_ok, base::TimeDelta duration) {
+ DVLOG(1) << "OnInit: ok=" << init_ok
+ << ", dur=" << duration.InMilliseconds();
+ }
+
+ bool OnNewConfig(const AudioDecoderConfig& ac, const VideoDecoderConfig& vc) {
+ DVLOG(1) << "OnNewConfig: audio=" << ac.IsValidConfig()
+ << ", video=" << vc.IsValidConfig();
+ return true;
+ }
+
+
+ void DumpBuffers(const std::string& label,
+ const StreamParser::BufferQueue& buffers) {
+ DVLOG(2) << "DumpBuffers: " << label << " size " << buffers.size();
+ for (StreamParser::BufferQueue::const_iterator buf = buffers.begin();
+ buf != buffers.end(); buf++) {
+ DVLOG(3) << " n=" << buf - buffers.begin()
+ << ", size=" << (*buf)->data_size()
+ << ", dur=" << (*buf)->duration().InMilliseconds();
+ }
+ }
+
+ bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers) {
+ DumpBuffers("audio_buffers", audio_buffers);
+ DumpBuffers("video_buffers", video_buffers);
+ audio_frame_count_ += audio_buffers.size();
+ video_frame_count_ += video_buffers.size();
+
+ if (video_min_dts_ == kNoTimestamp() && !video_buffers.empty())
+ video_min_dts_ = video_buffers.front()->GetDecodeTimestamp();
+ if (!video_buffers.empty()) {
+ video_max_dts_ = video_buffers.back()->GetDecodeTimestamp();
+ // Verify monotonicity.
+ StreamParser::BufferQueue::const_iterator it1 = video_buffers.begin();
+ StreamParser::BufferQueue::const_iterator it2 = ++it1;
+ for ( ; it2 != video_buffers.end(); ++it1, ++it2) {
+ if ((*it2)->GetDecodeTimestamp() < (*it1)->GetDecodeTimestamp())
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool OnNewTextBuffers(TextTrack* text_track,
+ const StreamParser::BufferQueue& buffers) {
+ return true;
+ }
+
+ void OnKeyNeeded(const std::string& type,
+ const std::vector<uint8>& init_data) {
+ DVLOG(1) << "OnKeyNeeded: " << init_data.size();
+ }
+
+ scoped_ptr<TextTrack> OnAddTextTrack(
+ TextKind kind,
+ const std::string& label,
+ const std::string& language) {
+ return scoped_ptr<TextTrack>();
+ }
+
+ void OnNewSegment() {
+ DVLOG(1) << "OnNewSegment";
+ }
+
+ void OnEndOfSegment() {
+ DVLOG(1) << "OnEndOfSegment()";
+ }
+
+ void InitializeParser() {
+ parser_->Init(
+ base::Bind(&Mp2tStreamParserTest::OnInit,
+ base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnNewConfig,
+ base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnNewBuffers,
+ base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnNewTextBuffers,
+ base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnKeyNeeded,
+ base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnAddTextTrack,
+ base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnNewSegment,
+ base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnEndOfSegment,
+ base::Unretained(this)),
+ LogCB());
+ }
+
+ bool ParseMpeg2TsFile(const std::string& filename, int append_bytes) {
+ InitializeParser();
+
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
+ EXPECT_TRUE(AppendDataInPieces(buffer->data(),
+ buffer->data_size(),
+ append_bytes));
+ return true;
+ }
+};
+
+TEST_F(Mp2tStreamParserTest, UnalignedAppend17) {
+ // Test small, non-segment-aligned appends.
+ ParseMpeg2TsFile("bear-1280x720.ts", 17);
+ EXPECT_EQ(video_frame_count_, 81);
+ parser_->Flush();
+ EXPECT_EQ(video_frame_count_, 82);
+}
+
+TEST_F(Mp2tStreamParserTest, UnalignedAppend512) {
+ // Test small, non-segment-aligned appends.
+ ParseMpeg2TsFile("bear-1280x720.ts", 512);
+ EXPECT_EQ(video_frame_count_, 81);
+ parser_->Flush();
+ EXPECT_EQ(video_frame_count_, 82);
+}
+
+TEST_F(Mp2tStreamParserTest, TimestampWrapAround) {
+ // "bear-1280x720_ptswraparound.ts" has been transcoded
+ // from bear-1280x720.mp4 by applying a time offset of 95442s
+ // (close to 2^33 / 90000) which results in timestamps wrap around
+ // in the Mpeg2 TS stream.
+ ParseMpeg2TsFile("bear-1280x720_ptswraparound.ts", 512);
+ EXPECT_EQ(video_frame_count_, 81);
+ EXPECT_GE(video_min_dts_, base::TimeDelta::FromSeconds(95443 - 10));
+ EXPECT_LE(video_max_dts_, base::TimeDelta::FromSeconds(95443 + 10));
+}
+
+} // namespace mp2t
+} // namespace media
diff --git a/chromium/media/mp2t/ts_packet.cc b/chromium/media/mp2t/ts_packet.cc
new file mode 100644
index 00000000000..6b41e907501
--- /dev/null
+++ b/chromium/media/mp2t/ts_packet.cc
@@ -0,0 +1,215 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/ts_packet.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/bit_reader.h"
+#include "media/mp2t/mp2t_common.h"
+
+namespace media {
+namespace mp2t {
+
+static const uint8 kTsHeaderSyncword = 0x47;
+
+// static
+int TsPacket::Sync(const uint8* buf, int size) {
+ int k = 0;
+ for (; k < size; k++) {
+ // Verify that we have 4 syncwords in a row when possible,
+ // this should improve synchronization robustness.
+ // TODO(damienv): Consider the case where there is garbage
+ // between TS packets.
+ bool is_header = true;
+ for (int i = 0; i < 4; i++) {
+ int idx = k + i * kPacketSize;
+ if (idx >= size)
+ break;
+ if (buf[idx] != kTsHeaderSyncword) {
+ DVLOG(LOG_LEVEL_TS)
+ << "ByteSync" << idx << ": "
+ << std::hex << static_cast<int>(buf[idx]) << std::dec;
+ is_header = false;
+ break;
+ }
+ }
+ if (is_header)
+ break;
+ }
+
+ DVLOG_IF(1, k != 0) << "SYNC: nbytes_skipped=" << k;
+ return k;
+}
+
+// static
+TsPacket* TsPacket::Parse(const uint8* buf, int size) {
+ if (size < kPacketSize) {
+ DVLOG(1) << "Buffer does not hold one full TS packet:"
+ << " buffer_size=" << size;
+ return NULL;
+ }
+
+ DCHECK_EQ(buf[0], kTsHeaderSyncword);
+ if (buf[0] != kTsHeaderSyncword) {
+ DVLOG(1) << "Not on a TS syncword:"
+ << " buf[0]="
+ << std::hex << static_cast<int>(buf[0]) << std::dec;
+ return NULL;
+ }
+
+ scoped_ptr<TsPacket> ts_packet(new TsPacket());
+ bool status = ts_packet->ParseHeader(buf);
+ if (!status) {
+ DVLOG(1) << "Parsing header failed";
+ return NULL;
+ }
+ return ts_packet.release();
+}
+
+TsPacket::TsPacket() {
+}
+
+TsPacket::~TsPacket() {
+}
+
+bool TsPacket::ParseHeader(const uint8* buf) {
+ BitReader bit_reader(buf, kPacketSize);
+ payload_ = buf;
+ payload_size_ = kPacketSize;
+
+ // Read the TS header: 4 bytes.
+ int syncword;
+ int transport_error_indicator;
+ int payload_unit_start_indicator;
+ int transport_priority;
+ int transport_scrambling_control;
+ int adaptation_field_control;
+ RCHECK(bit_reader.ReadBits(8, &syncword));
+ RCHECK(bit_reader.ReadBits(1, &transport_error_indicator));
+ RCHECK(bit_reader.ReadBits(1, &payload_unit_start_indicator));
+ RCHECK(bit_reader.ReadBits(1, &transport_priority));
+ RCHECK(bit_reader.ReadBits(13, &pid_));
+ RCHECK(bit_reader.ReadBits(2, &transport_scrambling_control));
+ RCHECK(bit_reader.ReadBits(2, &adaptation_field_control));
+ RCHECK(bit_reader.ReadBits(4, &continuity_counter_));
+ payload_unit_start_indicator_ = (payload_unit_start_indicator != 0);
+ payload_ += 4;
+ payload_size_ -= 4;
+
+ // Default values when no adaptation field.
+ discontinuity_indicator_ = false;
+ random_access_indicator_ = false;
+
+ // Done since no adaptation field.
+ if ((adaptation_field_control & 0x2) == 0)
+ return true;
+
+ // Read the adaptation field if needed.
+ int adaptation_field_length;
+ RCHECK(bit_reader.ReadBits(8, &adaptation_field_length));
+ DVLOG(LOG_LEVEL_TS) << "adaptation_field_length=" << adaptation_field_length;
+ payload_ += 1;
+ payload_size_ -= 1;
+ if ((adaptation_field_control & 0x1) == 0 &&
+ adaptation_field_length != 183) {
+ DVLOG(1) << "adaptation_field_length=" << adaptation_field_length;
+ return false;
+ }
+ if ((adaptation_field_control & 0x1) == 1 &&
+ adaptation_field_length > 182) {
+ DVLOG(1) << "adaptation_field_length=" << adaptation_field_length;
+ // This is not allowed by the spec.
+ // However, some badly encoded streams are using
+ // adaptation_field_length = 183
+ return false;
+ }
+
+ // adaptation_field_length = '0' is used to insert a single stuffing byte
+ // in the adaptation field of a transport stream packet.
+ if (adaptation_field_length == 0)
+ return true;
+
+ bool status = ParseAdaptationField(&bit_reader, adaptation_field_length);
+ payload_ += adaptation_field_length;
+ payload_size_ -= adaptation_field_length;
+ return status;
+}
+
+bool TsPacket::ParseAdaptationField(BitReader* bit_reader,
+ int adaptation_field_length) {
+ DCHECK_GT(adaptation_field_length, 0);
+ int adaptation_field_start_marker = bit_reader->bits_available() / 8;
+
+ int discontinuity_indicator;
+ int random_access_indicator;
+ int elementary_stream_priority_indicator;
+ int pcr_flag;
+ int opcr_flag;
+ int splicing_point_flag;
+ int transport_private_data_flag;
+ int adaptation_field_extension_flag;
+ RCHECK(bit_reader->ReadBits(1, &discontinuity_indicator));
+ RCHECK(bit_reader->ReadBits(1, &random_access_indicator));
+ RCHECK(bit_reader->ReadBits(1, &elementary_stream_priority_indicator));
+ RCHECK(bit_reader->ReadBits(1, &pcr_flag));
+ RCHECK(bit_reader->ReadBits(1, &opcr_flag));
+ RCHECK(bit_reader->ReadBits(1, &splicing_point_flag));
+ RCHECK(bit_reader->ReadBits(1, &transport_private_data_flag));
+ RCHECK(bit_reader->ReadBits(1, &adaptation_field_extension_flag));
+ discontinuity_indicator_ = (discontinuity_indicator != 0);
+ random_access_indicator_ = (random_access_indicator != 0);
+
+ if (pcr_flag) {
+ int64 program_clock_reference_base;
+ int reserved;
+ int program_clock_reference_extension;
+ RCHECK(bit_reader->ReadBits(33, &program_clock_reference_base));
+ RCHECK(bit_reader->ReadBits(6, &reserved));
+ RCHECK(bit_reader->ReadBits(9, &program_clock_reference_extension));
+ }
+
+ if (opcr_flag) {
+ int64 original_program_clock_reference_base;
+ int reserved;
+ int original_program_clock_reference_extension;
+ RCHECK(bit_reader->ReadBits(33, &original_program_clock_reference_base));
+ RCHECK(bit_reader->ReadBits(6, &reserved));
+ RCHECK(
+ bit_reader->ReadBits(9, &original_program_clock_reference_extension));
+ }
+
+ if (splicing_point_flag) {
+ int splice_countdown;
+ RCHECK(bit_reader->ReadBits(8, &splice_countdown));
+ }
+
+ if (transport_private_data_flag) {
+ int transport_private_data_length;
+ RCHECK(bit_reader->ReadBits(8, &transport_private_data_length));
+ RCHECK(bit_reader->SkipBits(8 * transport_private_data_length));
+ }
+
+ if (adaptation_field_extension_flag) {
+ int adaptation_field_extension_length;
+ RCHECK(bit_reader->ReadBits(8, &adaptation_field_extension_length));
+ RCHECK(bit_reader->SkipBits(8 * adaptation_field_extension_length));
+ }
+
+ // The rest of the adaptation field should be stuffing bytes.
+ int adaptation_field_remaining_size = adaptation_field_length -
+ (adaptation_field_start_marker - bit_reader->bits_available() / 8);
+ RCHECK(adaptation_field_remaining_size >= 0);
+ for (int k = 0; k < adaptation_field_remaining_size; k++) {
+ int stuffing_byte;
+ RCHECK(bit_reader->ReadBits(8, &stuffing_byte));
+ RCHECK(stuffing_byte == 0xff);
+ }
+
+ DVLOG(LOG_LEVEL_TS) << "random_access_indicator=" << random_access_indicator_;
+ return true;
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/ts_packet.h b/chromium/media/mp2t/ts_packet.h
new file mode 100644
index 00000000000..f3537bc8fe2
--- /dev/null
+++ b/chromium/media/mp2t/ts_packet.h
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_TS_PACKET_H_
+#define MEDIA_MP2T_TS_PACKET_H_
+
+#include "base/basictypes.h"
+
+namespace media {
+
+class BitReader;
+
+namespace mp2t {
+
+class TsPacket {
+ public:
+ static const int kPacketSize = 188;
+
+ // Return the number of bytes to discard
+ // to be synchronized on a TS syncword.
+ static int Sync(const uint8* buf, int size);
+
+ // Parse a TS packet.
+ // Return a TsPacket only when parsing was successful.
+ // Return NULL otherwise.
+ static TsPacket* Parse(const uint8* buf, int size);
+
+ ~TsPacket();
+
+ // TS header accessors.
+ bool payload_unit_start_indicator() const {
+ return payload_unit_start_indicator_;
+ }
+ int pid() const { return pid_; }
+ int continuity_counter() const { return continuity_counter_; }
+ bool discontinuity_indicator() const { return discontinuity_indicator_; }
+ bool random_access_indicator() const { return random_access_indicator_; }
+
+ // Return the offset and the size of the payload.
+ const uint8* payload() const { return payload_; }
+ int payload_size() const { return payload_size_; }
+
+ private:
+ TsPacket();
+
+ // Parse an Mpeg2 TS header.
+ // The buffer size should be at least |kPacketSize|
+ bool ParseHeader(const uint8* buf);
+ bool ParseAdaptationField(BitReader* bit_reader,
+ int adaptation_field_length);
+
+ // Size of the payload.
+ const uint8* payload_;
+ int payload_size_;
+
+ // TS header.
+ bool payload_unit_start_indicator_;
+ int pid_;
+ int continuity_counter_;
+
+ // Params from the adaptation field.
+ bool discontinuity_indicator_;
+ bool random_access_indicator_;
+
+ DISALLOW_COPY_AND_ASSIGN(TsPacket);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp2t/ts_section.h b/chromium/media/mp2t/ts_section.h
new file mode 100644
index 00000000000..1b7453f837d
--- /dev/null
+++ b/chromium/media/mp2t/ts_section.h
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_TS_SECTION_H_
+#define MEDIA_MP2T_TS_SECTION_H_
+
+namespace media {
+namespace mp2t {
+
+class TsSection {
+ public:
+ // From ISO/IEC 13818-1 or ITU H.222 spec: Table 2-3 - PID table.
+ enum SpecialPid {
+ kPidPat = 0x0,
+ kPidCat = 0x1,
+ kPidTsdt = 0x2,
+ kPidNullPacket = 0x1fff,
+ kPidMax = 0x1fff,
+ };
+
+ virtual ~TsSection() {}
+
+ // Parse the data bytes of the TS packet.
+ // Return true if parsing is successful.
+ virtual bool Parse(bool payload_unit_start_indicator,
+ const uint8* buf, int size) = 0;
+
+ // Process bytes that have not been processed yet (pending buffers in the
+ // pipe). Flush might thus results in frame emission, as an example.
+ virtual void Flush() = 0;
+
+ // Reset the state of the parser to its initial state.
+ virtual void Reset() = 0;
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
diff --git a/chromium/media/mp2t/ts_section_pat.cc b/chromium/media/mp2t/ts_section_pat.cc
new file mode 100644
index 00000000000..ef5a21c6f0b
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_pat.cc
@@ -0,0 +1,122 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/ts_section_pat.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "media/base/bit_reader.h"
+#include "media/mp2t/mp2t_common.h"
+
+namespace media {
+namespace mp2t {
+
+TsSectionPat::TsSectionPat(const RegisterPmtCb& register_pmt_cb)
+ : register_pmt_cb_(register_pmt_cb),
+ version_number_(-1) {
+}
+
+TsSectionPat::~TsSectionPat() {
+}
+
+bool TsSectionPat::ParsePsiSection(BitReader* bit_reader) {
+ // Read the fixed section length.
+ int table_id;
+ int section_syntax_indicator;
+ int dummy_zero;
+ int reserved;
+ int section_length;
+ int transport_stream_id;
+ int version_number;
+ int current_next_indicator;
+ int section_number;
+ int last_section_number;
+ RCHECK(bit_reader->ReadBits(8, &table_id));
+ RCHECK(bit_reader->ReadBits(1, &section_syntax_indicator));
+ RCHECK(bit_reader->ReadBits(1, &dummy_zero));
+ RCHECK(bit_reader->ReadBits(2, &reserved));
+ RCHECK(bit_reader->ReadBits(12, &section_length));
+ RCHECK(section_length >= 5);
+ RCHECK(section_length <= 1021);
+ RCHECK(bit_reader->ReadBits(16, &transport_stream_id));
+ RCHECK(bit_reader->ReadBits(2, &reserved));
+ RCHECK(bit_reader->ReadBits(5, &version_number));
+ RCHECK(bit_reader->ReadBits(1, &current_next_indicator));
+ RCHECK(bit_reader->ReadBits(8, &section_number));
+ RCHECK(bit_reader->ReadBits(8, &last_section_number));
+ section_length -= 5;
+
+ // Perform a few verifications:
+ // - Table ID should be 0 for a PAT.
+ // - section_syntax_indicator should be one.
+ // - section length should not exceed 1021
+ RCHECK(table_id == 0x0);
+ RCHECK(section_syntax_indicator);
+ RCHECK(!dummy_zero);
+
+ // Both the program table and the CRC have a size multiple of 4.
+ // Note for pmt_pid_count: minus 4 to account for the CRC.
+ RCHECK((section_length % 4) == 0);
+ int pmt_pid_count = (section_length - 4) / 4;
+
+ // Read the variable length section: program table & crc.
+ std::vector<int> program_number_array(pmt_pid_count);
+ std::vector<int> pmt_pid_array(pmt_pid_count);
+ for (int k = 0; k < pmt_pid_count; k++) {
+ int reserved;
+ RCHECK(bit_reader->ReadBits(16, &program_number_array[k]));
+ RCHECK(bit_reader->ReadBits(3, &reserved));
+ RCHECK(bit_reader->ReadBits(13, &pmt_pid_array[k]));
+ }
+ int crc32;
+ RCHECK(bit_reader->ReadBits(32, &crc32));
+
+ // Just ignore the PAT if not applicable yet.
+ if (!current_next_indicator) {
+ DVLOG(1) << "Not supported: received a PAT not applicable yet";
+ return true;
+ }
+
+ // Ignore the program table if it hasn't changed.
+ if (version_number == version_number_)
+ return true;
+
+ // Both the MSE and the HLS spec specifies that TS streams should convey
+ // exactly one program.
+ if (pmt_pid_count > 1) {
+ DVLOG(1) << "Multiple programs detected in the Mpeg2 TS stream";
+ return false;
+ }
+
+ // Can now register the PMT.
+#if !defined(NDEBUG)
+ int expected_version_number = version_number;
+ if (version_number_ >= 0)
+ expected_version_number = (version_number_ + 1) % 32;
+ DVLOG_IF(1, version_number != expected_version_number)
+ << "Unexpected version number: "
+ << version_number << " vs " << version_number_;
+#endif
+ for (int k = 0; k < pmt_pid_count; k++) {
+ if (program_number_array[k] != 0) {
+ // Program numbers different from 0 correspond to PMT.
+ register_pmt_cb_.Run(program_number_array[k], pmt_pid_array[k]);
+ // Even if there are multiple programs, only one can be supported now.
+ // HLS: "Transport Stream segments MUST contain a single MPEG-2 Program."
+ break;
+ }
+ }
+ version_number_ = version_number;
+
+ return true;
+}
+
+void TsSectionPat::ResetPsiSection() {
+ version_number_ = -1;
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/ts_section_pat.h b/chromium/media/mp2t/ts_section_pat.h
new file mode 100644
index 00000000000..84f33de7e48
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_pat.h
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_TS_SECTION_PAT_H_
+#define MEDIA_MP2T_TS_SECTION_PAT_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "media/mp2t/ts_section_psi.h"
+
+namespace media {
+namespace mp2t {
+
+class TsSectionPat : public TsSectionPsi {
+ public:
+ // RegisterPmtCb::Run(int program_number, int pmt_pid);
+ typedef base::Callback<void(int, int)> RegisterPmtCb;
+
+ explicit TsSectionPat(const RegisterPmtCb& register_pmt_cb);
+ virtual ~TsSectionPat();
+
+ // TsSectionPsi implementation.
+ virtual bool ParsePsiSection(BitReader* bit_reader) OVERRIDE;
+ virtual void ResetPsiSection() OVERRIDE;
+
+ private:
+ RegisterPmtCb register_pmt_cb_;
+
+ // Parameters from the PAT.
+ int version_number_;
+
+ DISALLOW_COPY_AND_ASSIGN(TsSectionPat);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp2t/ts_section_pes.cc b/chromium/media/mp2t/ts_section_pes.cc
new file mode 100644
index 00000000000..ff0beaaf2de
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_pes.cc
@@ -0,0 +1,312 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/ts_section_pes.h"
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "media/base/bit_reader.h"
+#include "media/base/buffers.h"
+#include "media/mp2t/es_parser.h"
+#include "media/mp2t/mp2t_common.h"
+
+static const int kPesStartCode = 0x000001;
+
+// Given that |time| is coded using 33 bits,
+// UnrollTimestamp returns the corresponding unrolled timestamp.
+// The unrolled timestamp is defined by:
+// |time| + k * (2 ^ 33)
+// where k is estimated so that the unrolled timestamp
+// is as close as possible to |previous_unrolled_time|.
+static int64 UnrollTimestamp(int64 previous_unrolled_time, int64 time) {
+ // Mpeg2 TS timestamps have an accuracy of 33 bits.
+ const int nbits = 33;
+
+ // |timestamp| has a precision of |nbits|
+ // so make sure the highest bits are set to 0.
+ DCHECK_EQ((time >> nbits), 0);
+
+ // Consider 3 possibilities to estimate the missing high bits of |time|.
+ int64 previous_unrolled_time_high =
+ (previous_unrolled_time >> nbits);
+ int64 time0 = ((previous_unrolled_time_high - 1) << nbits) | time;
+ int64 time1 = ((previous_unrolled_time_high + 0) << nbits) | time;
+ int64 time2 = ((previous_unrolled_time_high + 1) << nbits) | time;
+
+ // Select the min absolute difference with the current time
+ // so as to ensure time continuity.
+ int64 diff0 = time0 - previous_unrolled_time;
+ int64 diff1 = time1 - previous_unrolled_time;
+ int64 diff2 = time2 - previous_unrolled_time;
+ if (diff0 < 0)
+ diff0 = -diff0;
+ if (diff1 < 0)
+ diff1 = -diff1;
+ if (diff2 < 0)
+ diff2 = -diff2;
+
+ int64 unrolled_time;
+ int64 min_diff;
+ if (diff1 < diff0) {
+ unrolled_time = time1;
+ min_diff = diff1;
+ } else {
+ unrolled_time = time0;
+ min_diff = diff0;
+ }
+ if (diff2 < min_diff)
+ unrolled_time = time2;
+
+ return unrolled_time;
+}
+
+static bool IsTimestampSectionValid(int64 timestamp_section) {
+ // |pts_section| has 40 bits:
+ // - starting with either '0010' or '0011' or '0001'
+ // - and ending with a marker bit.
+ // See ITU H.222 standard - PES section.
+
+ // Verify that all the marker bits are set to one.
+ return ((timestamp_section & 0x1) != 0) &&
+ ((timestamp_section & 0x10000) != 0) &&
+ ((timestamp_section & 0x100000000) != 0);
+}
+
+static int64 ConvertTimestampSectionToTimestamp(int64 timestamp_section) {
+ return (((timestamp_section >> 33) & 0x7) << 30) |
+ (((timestamp_section >> 17) & 0x7fff) << 15) |
+ (((timestamp_section >> 1) & 0x7fff) << 0);
+}
+
+namespace media {
+namespace mp2t {
+
+TsSectionPes::TsSectionPes(scoped_ptr<EsParser> es_parser)
+ : es_parser_(es_parser.release()),
+ wait_for_pusi_(true),
+ previous_pts_valid_(false),
+ previous_pts_(0),
+ previous_dts_valid_(false),
+ previous_dts_(0) {
+ DCHECK(es_parser_);
+}
+
+TsSectionPes::~TsSectionPes() {
+}
+
+bool TsSectionPes::Parse(bool payload_unit_start_indicator,
+ const uint8* buf, int size) {
+ // Ignore partial PES.
+ if (wait_for_pusi_ && !payload_unit_start_indicator)
+ return true;
+
+ bool parse_result = true;
+ if (payload_unit_start_indicator) {
+ // Try emitting a packet since we might have a pending PES packet
+ // with an undefined size.
+ // In this case, a unit is emitted when the next unit is coming.
+ int raw_pes_size;
+ const uint8* raw_pes;
+ pes_byte_queue_.Peek(&raw_pes, &raw_pes_size);
+ if (raw_pes_size > 0)
+ parse_result = Emit(true);
+
+ // Reset the state.
+ ResetPesState();
+
+ // Update the state.
+ wait_for_pusi_ = false;
+ }
+
+ // Add the data to the parser state.
+ if (size > 0)
+ pes_byte_queue_.Push(buf, size);
+
+ // Try emitting the current PES packet.
+ return (parse_result && Emit(false));
+}
+
+void TsSectionPes::Flush() {
+ // Try emitting a packet since we might have a pending PES packet
+ // with an undefined size.
+ Emit(true);
+
+ // Flush the underlying ES parser.
+ es_parser_->Flush();
+}
+
+void TsSectionPes::Reset() {
+ ResetPesState();
+
+ previous_pts_valid_ = false;
+ previous_pts_ = 0;
+ previous_dts_valid_ = false;
+ previous_dts_ = 0;
+
+ es_parser_->Reset();
+}
+
+bool TsSectionPes::Emit(bool emit_for_unknown_size) {
+ int raw_pes_size;
+ const uint8* raw_pes;
+ pes_byte_queue_.Peek(&raw_pes, &raw_pes_size);
+
+ // A PES should be at least 6 bytes.
+ // Wait for more data to come if not enough bytes.
+ if (raw_pes_size < 6)
+ return true;
+
+ // Check whether we have enough data to start parsing.
+ int pes_packet_length =
+ (static_cast<int>(raw_pes[4]) << 8) |
+ (static_cast<int>(raw_pes[5]));
+ if ((pes_packet_length == 0 && !emit_for_unknown_size) ||
+ (pes_packet_length != 0 && raw_pes_size < pes_packet_length + 6)) {
+ // Wait for more data to come either because:
+ // - there are not enough bytes,
+ // - or the PES size is unknown and the "force emit" flag is not set.
+ // (PES size might be unknown for video PES packet).
+ return true;
+ }
+ DVLOG(LOG_LEVEL_PES) << "pes_packet_length=" << pes_packet_length;
+
+ // Parse the packet.
+ bool parse_result = ParseInternal(raw_pes, raw_pes_size);
+
+ // Reset the state.
+ ResetPesState();
+
+ return parse_result;
+}
+
+bool TsSectionPes::ParseInternal(const uint8* raw_pes, int raw_pes_size) {
+ BitReader bit_reader(raw_pes, raw_pes_size);
+
+ // Read up to the pes_packet_length (6 bytes).
+ int packet_start_code_prefix;
+ int stream_id;
+ int pes_packet_length;
+ RCHECK(bit_reader.ReadBits(24, &packet_start_code_prefix));
+ RCHECK(bit_reader.ReadBits(8, &stream_id));
+ RCHECK(bit_reader.ReadBits(16, &pes_packet_length));
+
+ RCHECK(packet_start_code_prefix == kPesStartCode);
+ DVLOG(LOG_LEVEL_PES) << "stream_id=" << std::hex << stream_id << std::dec;
+ if (pes_packet_length == 0)
+ pes_packet_length = bit_reader.bits_available() / 8;
+
+ // Ignore the PES for unknown stream IDs.
+ // See ITU H.222 Table 2-22 "Stream_id assignments"
+ bool is_audio_stream_id = ((stream_id & 0xe0) == 0xc0);
+ bool is_video_stream_id = ((stream_id & 0xf0) == 0xe0);
+ if (!is_audio_stream_id && !is_video_stream_id)
+ return true;
+
+ // Read up to "pes_header_data_length".
+ int dummy_2;
+ int PES_scrambling_control;
+ int PES_priority;
+ int data_alignment_indicator;
+ int copyright;
+ int original_or_copy;
+ int pts_dts_flags;
+ int escr_flag;
+ int es_rate_flag;
+ int dsm_trick_mode_flag;
+ int additional_copy_info_flag;
+ int pes_crc_flag;
+ int pes_extension_flag;
+ int pes_header_data_length;
+ RCHECK(bit_reader.ReadBits(2, &dummy_2));
+ RCHECK(dummy_2 == 0x2);
+ RCHECK(bit_reader.ReadBits(2, &PES_scrambling_control));
+ RCHECK(bit_reader.ReadBits(1, &PES_priority));
+ RCHECK(bit_reader.ReadBits(1, &data_alignment_indicator));
+ RCHECK(bit_reader.ReadBits(1, &copyright));
+ RCHECK(bit_reader.ReadBits(1, &original_or_copy));
+ RCHECK(bit_reader.ReadBits(2, &pts_dts_flags));
+ RCHECK(bit_reader.ReadBits(1, &escr_flag));
+ RCHECK(bit_reader.ReadBits(1, &es_rate_flag));
+ RCHECK(bit_reader.ReadBits(1, &dsm_trick_mode_flag));
+ RCHECK(bit_reader.ReadBits(1, &additional_copy_info_flag));
+ RCHECK(bit_reader.ReadBits(1, &pes_crc_flag));
+ RCHECK(bit_reader.ReadBits(1, &pes_extension_flag));
+ RCHECK(bit_reader.ReadBits(8, &pes_header_data_length));
+ int pes_header_start_size = bit_reader.bits_available() / 8;
+
+ // Compute the size and the offset of the ES payload.
+ // "6" for the 6 bytes read before and including |pes_packet_length|.
+ // "3" for the 3 bytes read before and including |pes_header_data_length|.
+ int es_size = pes_packet_length - 3 - pes_header_data_length;
+ int es_offset = 6 + 3 + pes_header_data_length;
+ RCHECK(es_size >= 0);
+ RCHECK(es_offset + es_size <= raw_pes_size);
+
+ // Read the timing information section.
+ bool is_pts_valid = false;
+ bool is_dts_valid = false;
+ int64 pts_section = 0;
+ int64 dts_section = 0;
+ if (pts_dts_flags == 0x2) {
+ RCHECK(bit_reader.ReadBits(40, &pts_section));
+ RCHECK((((pts_section >> 36) & 0xf) == 0x2) &&
+ IsTimestampSectionValid(pts_section));
+ is_pts_valid = true;
+ }
+ if (pts_dts_flags == 0x3) {
+ RCHECK(bit_reader.ReadBits(40, &pts_section));
+ RCHECK(bit_reader.ReadBits(40, &dts_section));
+ RCHECK((((pts_section >> 36) & 0xf) == 0x3) &&
+ IsTimestampSectionValid(pts_section));
+ RCHECK((((dts_section >> 36) & 0xf) == 0x1) &&
+ IsTimestampSectionValid(dts_section));
+ is_pts_valid = true;
+ is_dts_valid = true;
+ }
+
+ // Convert and unroll the timestamps.
+ base::TimeDelta media_pts(kNoTimestamp());
+ base::TimeDelta media_dts(kNoTimestamp());
+ if (is_pts_valid) {
+ int64 pts = ConvertTimestampSectionToTimestamp(pts_section);
+ if (previous_pts_valid_)
+ pts = UnrollTimestamp(previous_pts_, pts);
+ previous_pts_ = pts;
+ previous_pts_valid_ = true;
+ media_pts = base::TimeDelta::FromMicroseconds((1000 * pts) / 90);
+ }
+ if (is_dts_valid) {
+ int64 dts = ConvertTimestampSectionToTimestamp(dts_section);
+ if (previous_dts_valid_)
+ dts = UnrollTimestamp(previous_dts_, dts);
+ previous_dts_ = dts;
+ previous_dts_valid_ = true;
+ media_dts = base::TimeDelta::FromMicroseconds((1000 * dts) / 90);
+ }
+
+ // Discard the rest of the PES packet header.
+ // TODO(damienv): check if some info of the PES packet header are useful.
+ DCHECK_EQ(bit_reader.bits_available() % 8, 0);
+ int pes_header_remaining_size = pes_header_data_length -
+ (pes_header_start_size - bit_reader.bits_available() / 8);
+ RCHECK(pes_header_remaining_size >= 0);
+
+ // Read the PES packet.
+ DVLOG(LOG_LEVEL_PES)
+ << "Emit a reassembled PES:"
+ << " size=" << es_size
+ << " pts=" << media_pts.InMilliseconds()
+ << " dts=" << media_dts.InMilliseconds()
+ << " data_alignment_indicator=" << data_alignment_indicator;
+ return es_parser_->Parse(&raw_pes[es_offset], es_size, media_pts, media_dts);
+}
+
+void TsSectionPes::ResetPesState() {
+ pes_byte_queue_.Reset();
+ wait_for_pusi_ = true;
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/ts_section_pes.h b/chromium/media/mp2t/ts_section_pes.h
new file mode 100644
index 00000000000..b80473a58a5
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_pes.h
@@ -0,0 +1,64 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_TS_SECTION_PES_H_
+#define MEDIA_MP2T_TS_SECTION_PES_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/byte_queue.h"
+#include "media/mp2t/ts_section.h"
+
+namespace media {
+namespace mp2t {
+
+class EsParser;
+
+class TsSectionPes : public TsSection {
+ public:
+ explicit TsSectionPes(scoped_ptr<EsParser> es_parser);
+ virtual ~TsSectionPes();
+
+ // TsSection implementation.
+ virtual bool Parse(bool payload_unit_start_indicator,
+ const uint8* buf, int size) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ private:
+ // Emit a reassembled PES packet.
+ // Return true if successful.
+ // |emit_for_unknown_size| is used to force emission for PES packets
+ // whose size is unknown.
+ bool Emit(bool emit_for_unknown_size);
+
+ // Parse a PES packet, return true if successful.
+ bool ParseInternal(const uint8* raw_pes, int raw_pes_size);
+
+ void ResetPesState();
+
+ // Bytes of the current PES.
+ ByteQueue pes_byte_queue_;
+
+ // ES parser.
+ scoped_ptr<EsParser> es_parser_;
+
+ // Do not start parsing before getting a unit start indicator.
+ bool wait_for_pusi_;
+
+ // Used to unroll PTS and DTS.
+ bool previous_pts_valid_;
+ int64 previous_pts_;
+ bool previous_dts_valid_;
+ int64 previous_dts_;
+
+ DISALLOW_COPY_AND_ASSIGN(TsSectionPes);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp2t/ts_section_pmt.cc b/chromium/media/mp2t/ts_section_pmt.cc
new file mode 100644
index 00000000000..f20e79f9863
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_pmt.cc
@@ -0,0 +1,122 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/ts_section_pmt.h"
+
+#include <map>
+
+#include "base/logging.h"
+#include "media/base/bit_reader.h"
+#include "media/mp2t/mp2t_common.h"
+
+namespace media {
+namespace mp2t {
+
+TsSectionPmt::TsSectionPmt(const RegisterPesCb& register_pes_cb)
+ : register_pes_cb_(register_pes_cb) {
+}
+
+TsSectionPmt::~TsSectionPmt() {
+}
+
+bool TsSectionPmt::ParsePsiSection(BitReader* bit_reader) {
+ // Read up to |last_section_number|.
+ int table_id;
+ int section_syntax_indicator;
+ int dummy_zero;
+ int reserved;
+ int section_length;
+ int program_number;
+ int version_number;
+ int current_next_indicator;
+ int section_number;
+ int last_section_number;
+ RCHECK(bit_reader->ReadBits(8, &table_id));
+ RCHECK(bit_reader->ReadBits(1, &section_syntax_indicator));
+ RCHECK(bit_reader->ReadBits(1, &dummy_zero));
+ RCHECK(bit_reader->ReadBits(2, &reserved));
+ RCHECK(bit_reader->ReadBits(12, &section_length));
+ int section_start_marker = bit_reader->bits_available() / 8;
+
+ RCHECK(bit_reader->ReadBits(16, &program_number));
+ RCHECK(bit_reader->ReadBits(2, &reserved));
+ RCHECK(bit_reader->ReadBits(5, &version_number));
+ RCHECK(bit_reader->ReadBits(1, &current_next_indicator));
+ RCHECK(bit_reader->ReadBits(8, &section_number));
+ RCHECK(bit_reader->ReadBits(8, &last_section_number));
+
+ // Perform a few verifications:
+ // - table ID should be 2 for a PMT.
+ // - section_syntax_indicator should be one.
+ // - section length should not exceed 1021.
+ RCHECK(table_id == 0x2);
+ RCHECK(section_syntax_indicator);
+ RCHECK(!dummy_zero);
+ RCHECK(section_length <= 1021);
+ RCHECK(section_number == 0);
+ RCHECK(last_section_number == 0);
+
+ // TODO(damienv):
+ // Verify that there is no mismatch between the program number
+ // and the program number that was provided in a PAT for the current PMT.
+
+ // Read the end of the fixed length section.
+ int pcr_pid;
+ int program_info_length;
+ RCHECK(bit_reader->ReadBits(3, &reserved));
+ RCHECK(bit_reader->ReadBits(13, &pcr_pid));
+ RCHECK(bit_reader->ReadBits(4, &reserved));
+ RCHECK(bit_reader->ReadBits(12, &program_info_length));
+ RCHECK(program_info_length < 1024);
+
+ // Read the program info descriptor.
+ // TODO(damienv): check wether any of the descriptors could be useful.
+ // Defined in section 2.6 of ISO-13818.
+ RCHECK(bit_reader->SkipBits(8 * program_info_length));
+
+ // Read the ES description table.
+ // The end of the PID map if 4 bytes away from the end of the section
+ // (4 bytes = size of the CRC).
+ int pid_map_end_marker = section_start_marker - section_length + 4;
+ std::map<int, int> pid_map;
+ while (bit_reader->bits_available() > 8 * pid_map_end_marker) {
+ int stream_type;
+ int reserved;
+ int pid_es;
+ int es_info_length;
+ RCHECK(bit_reader->ReadBits(8, &stream_type));
+ RCHECK(bit_reader->ReadBits(3, &reserved));
+ RCHECK(bit_reader->ReadBits(13, &pid_es));
+ RCHECK(bit_reader->ReadBits(4, &reserved));
+ RCHECK(bit_reader->ReadBits(12, &es_info_length));
+
+ // Do not register the PID right away.
+ // Wait for the end of the section to be fully parsed
+ // to make sure there is no error.
+ pid_map.insert(std::pair<int, int>(pid_es, stream_type));
+
+ // Read the ES info descriptors.
+ // TODO(damienv): check wether any of the descriptors could be useful.
+ // Defined in section 2.6 of ISO-13818.
+ RCHECK(bit_reader->SkipBits(8 * es_info_length));
+ }
+
+ // Read the CRC.
+ int crc32;
+ RCHECK(bit_reader->ReadBits(32, &crc32));
+
+ // Once the PMT has been proved to be correct, register the PIDs.
+ for (std::map<int, int>::iterator it = pid_map.begin();
+ it != pid_map.end(); ++it)
+ register_pes_cb_.Run(it->first, it->second);
+
+ return true;
+}
+
+void TsSectionPmt::ResetPsiSection() {
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/ts_section_pmt.h b/chromium/media/mp2t/ts_section_pmt.h
new file mode 100644
index 00000000000..ece4d1670bd
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_pmt.h
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_TS_SECTION_PMT_H_
+#define MEDIA_MP2T_TS_SECTION_PMT_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "media/mp2t/ts_section_psi.h"
+
+namespace media {
+namespace mp2t {
+
+class TsSectionPmt : public TsSectionPsi {
+ public:
+ // RegisterPesCb::Run(int pes_pid, int stream_type);
+ // Stream type is defined in
+ // "Table 2-34 – Stream type assignments" in H.222
+ // TODO(damienv): add the program number.
+ typedef base::Callback<void(int, int)> RegisterPesCb;
+
+ explicit TsSectionPmt(const RegisterPesCb& register_pes_cb);
+ virtual ~TsSectionPmt();
+
+ // Mpeg2TsPsiParser implementation.
+ virtual bool ParsePsiSection(BitReader* bit_reader) OVERRIDE;
+ virtual void ResetPsiSection() OVERRIDE;
+
+ private:
+ RegisterPesCb register_pes_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(TsSectionPmt);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp2t/ts_section_psi.cc b/chromium/media/mp2t/ts_section_psi.cc
new file mode 100644
index 00000000000..f8a6fc310ce
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_psi.cc
@@ -0,0 +1,132 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp2t/ts_section_psi.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/bit_reader.h"
+#include "media/mp2t/mp2t_common.h"
+
+static bool IsCrcValid(const uint8* buf, int size) {
+ uint32 crc = 0xffffffffu;
+ const uint32 kCrcPoly = 0x4c11db7;
+
+ for (int k = 0; k < size; k++) {
+ int nbits = 8;
+ uint32 data_msb_aligned = buf[k];
+ data_msb_aligned <<= (32 - nbits);
+
+ while (nbits > 0) {
+ if ((data_msb_aligned ^ crc) & 0x80000000) {
+ crc <<= 1;
+ crc ^= kCrcPoly;
+ } else {
+ crc <<= 1;
+ }
+
+ data_msb_aligned <<= 1;
+ nbits--;
+ }
+ }
+
+ return (crc == 0);
+}
+
+namespace media {
+namespace mp2t {
+
+TsSectionPsi::TsSectionPsi()
+ : wait_for_pusi_(true),
+ leading_bytes_to_discard_(0) {
+}
+
+TsSectionPsi::~TsSectionPsi() {
+}
+
+bool TsSectionPsi::Parse(bool payload_unit_start_indicator,
+ const uint8* buf, int size) {
+ // Ignore partial PSI.
+ if (wait_for_pusi_ && !payload_unit_start_indicator)
+ return true;
+
+ if (payload_unit_start_indicator) {
+ // Reset the state of the PSI section.
+ ResetPsiState();
+
+ // Update the state.
+ wait_for_pusi_ = false;
+ DCHECK_GE(size, 1);
+ int pointer_field = buf[0];
+ leading_bytes_to_discard_ = pointer_field;
+ buf++;
+ size--;
+ }
+
+ // Discard some leading bytes if needed.
+ if (leading_bytes_to_discard_ > 0) {
+ int nbytes_to_discard = std::min(leading_bytes_to_discard_, size);
+ buf += nbytes_to_discard;
+ size -= nbytes_to_discard;
+ leading_bytes_to_discard_ -= nbytes_to_discard;
+ }
+ if (size == 0)
+ return true;
+
+ // Add the data to the parser state.
+ psi_byte_queue_.Push(buf, size);
+ int raw_psi_size;
+ const uint8* raw_psi;
+ psi_byte_queue_.Peek(&raw_psi, &raw_psi_size);
+
+ // Check whether we have enough data to start parsing.
+ if (raw_psi_size < 3)
+ return true;
+ int section_length =
+ ((static_cast<int>(raw_psi[1]) << 8) |
+ (static_cast<int>(raw_psi[2]))) & 0xfff;
+ if (section_length >= 1021)
+ return false;
+ int psi_length = section_length + 3;
+ if (raw_psi_size < psi_length) {
+ // Don't throw an error when there is not enough data,
+ // just wait for more data to come.
+ return true;
+ }
+
+ // There should not be any trailing bytes after a PMT.
+ // Instead, the pointer field should be used to stuff bytes.
+ DVLOG_IF(1, raw_psi_size > psi_length)
+ << "Trailing bytes after a PSI section: "
+ << psi_length << " vs " << raw_psi_size;
+
+ // Verify the CRC.
+ RCHECK(IsCrcValid(raw_psi, psi_length));
+
+ // Parse the PSI section.
+ BitReader bit_reader(raw_psi, raw_psi_size);
+ bool status = ParsePsiSection(&bit_reader);
+ if (status)
+ ResetPsiState();
+
+ return status;
+}
+
+void TsSectionPsi::Flush() {
+}
+
+void TsSectionPsi::Reset() {
+ ResetPsiSection();
+ ResetPsiState();
+}
+
+void TsSectionPsi::ResetPsiState() {
+ wait_for_pusi_ = true;
+ psi_byte_queue_.Reset();
+ leading_bytes_to_discard_ = 0;
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/ts_section_psi.h b/chromium/media/mp2t/ts_section_psi.h
new file mode 100644
index 00000000000..a63144633cc
--- /dev/null
+++ b/chromium/media/mp2t/ts_section_psi.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP2T_TS_SECTION_PSI_H_
+#define MEDIA_MP2T_TS_SECTION_PSI_H_
+
+#include "base/compiler_specific.h"
+#include "media/base/byte_queue.h"
+#include "media/mp2t/ts_section.h"
+
+namespace media {
+
+class BitReader;
+
+namespace mp2t {
+
+class TsSectionPsi : public TsSection {
+ public:
+ TsSectionPsi();
+ virtual ~TsSectionPsi();
+
+ // TsSection implementation.
+ virtual bool Parse(bool payload_unit_start_indicator,
+ const uint8* buf, int size) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ // Parse the content of the PSI section.
+ virtual bool ParsePsiSection(BitReader* bit_reader) = 0;
+
+ // Reset the state of the PSI section.
+ virtual void ResetPsiSection() = 0;
+
+ private:
+ void ResetPsiState();
+
+ // Bytes of the current PSI.
+ ByteQueue psi_byte_queue_;
+
+ // Do not start parsing before getting a unit start indicator.
+ bool wait_for_pusi_;
+
+ // Number of leading bytes to discard (pointer field).
+ int leading_bytes_to_discard_;
+
+ DISALLOW_COPY_AND_ASSIGN(TsSectionPsi);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif
+
diff --git a/chromium/media/mp3/mp3_stream_parser.cc b/chromium/media/mp3/mp3_stream_parser.cc
new file mode 100644
index 00000000000..0688d99fcc9
--- /dev/null
+++ b/chromium/media/mp3/mp3_stream_parser.cc
@@ -0,0 +1,598 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mp3/mp3_stream_parser.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/bit_reader.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/video_decoder_config.h"
+#include "net/http/http_util.h"
+
+namespace media {
+
+static const uint32 kMP3StartCodeMask = 0xffe00000;
+static const uint32 kICYStartCode = 0x49435920; // 'ICY '
+
+// Arbitrary upper bound on the size of an IceCast header before it
+// triggers an error.
+static const int kMaxIcecastHeaderSize = 4096;
+
+static const uint32 kID3StartCodeMask = 0xffffff00;
+static const uint32 kID3v1StartCode = 0x54414700; // 'TAG\0'
+static const int kID3v1Size = 128;
+static const int kID3v1ExtendedSize = 227;
+static const uint32 kID3v2StartCode = 0x49443300; // 'ID3\0'
+
+// Map that determines which bitrate_index & channel_mode combinations
+// are allowed.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const bool kIsAllowed[17][4] = {
+ { true, true, true, true }, // free
+ { true, false, false, false }, // 32
+ { true, false, false, false }, // 48
+ { true, false, false, false }, // 56
+ { true, true, true, true }, // 64
+ { true, false, false, false }, // 80
+ { true, true, true, true }, // 96
+ { true, true, true, true }, // 112
+ { true, true, true, true }, // 128
+ { true, true, true, true }, // 160
+ { true, true, true, true }, // 192
+ { false, true, true, true }, // 224
+ { false, true, true, true }, // 256
+ { false, true, true, true }, // 320
+ { false, true, true, true }, // 384
+ { false, false, false, false } // bad
+};
+
+// Maps version and layer information in the frame header
+// into an index for the |kBitrateMap|.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kVersionLayerMap[4][4] = {
+ // { reserved, L3, L2, L1 }
+ { 5, 4, 4, 3 }, // MPEG 2.5
+ { 5, 5, 5, 5 }, // reserved
+ { 5, 4, 4, 3 }, // MPEG 2
+ { 5, 2, 1, 0 } // MPEG 1
+};
+
+// Maps the bitrate index field in the header and an index
+// from |kVersionLayerMap| to a frame bitrate.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kBitrateMap[16][6] = {
+ // { V1L1, V1L2, V1L3, V2L1, V2L2 & V2L3, reserved }
+ { 0, 0, 0, 0, 0, 0 },
+ { 32, 32, 32, 32, 8, 0 },
+ { 64, 48, 40, 48, 16, 0 },
+ { 96, 56, 48, 56, 24, 0 },
+ { 128, 64, 56, 64, 32, 0 },
+ { 160, 80, 64, 80, 40, 0 },
+ { 192, 96, 80, 96, 48, 0 },
+ { 224, 112, 96, 112, 56, 0 },
+ { 256, 128, 112, 128, 64, 0 },
+ { 288, 160, 128, 144, 80, 0 },
+ { 320, 192, 160, 160, 96, 0 },
+ { 352, 224, 192, 176, 112, 0 },
+ { 384, 256, 224, 192, 128, 0 },
+ { 416, 320, 256, 224, 144, 0 },
+ { 448, 384, 320, 256, 160, 0 },
+ { 0, 0, 0, 0, 0}
+};
+
+// Maps the sample rate index and version fields from the frame header
+// to a sample rate.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kSampleRateMap[4][4] = {
+ // { V2.5, reserved, V2, V1 }
+ { 11025, 0, 22050, 44100 },
+ { 12000, 0, 24000, 48000 },
+ { 8000, 0, 16000, 32000 },
+ { 0, 0, 0, 0 }
+};
+
+// Frame header field constants.
+static const int kVersion1 = 3;
+static const int kVersion2 = 2;
+static const int kVersionReserved = 1;
+static const int kVersion2_5 = 0;
+static const int kLayerReserved = 0;
+static const int kLayer1 = 3;
+static const int kLayer2 = 2;
+static const int kLayer3 = 1;
+static const int kBitrateFree = 0;
+static const int kBitrateBad = 0xf;
+static const int kSampleRateReserved = 3;
+
+MP3StreamParser::MP3StreamParser()
+ : state_(UNINITIALIZED),
+ in_media_segment_(false) {
+}
+
+MP3StreamParser::~MP3StreamParser() {}
+
+void MP3StreamParser::Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ const NewTextBuffersCB& text_cb,
+ const NeedKeyCB& need_key_cb,
+ const AddTextTrackCB& add_text_track_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(state_, UNINITIALIZED);
+ init_cb_ = init_cb;
+ config_cb_ = config_cb;
+ new_buffers_cb_ = new_buffers_cb;
+ new_segment_cb_ = new_segment_cb;
+ end_of_segment_cb_ = end_of_segment_cb;
+ log_cb_ = log_cb;
+
+ ChangeState(INITIALIZED);
+}
+
+void MP3StreamParser::Flush() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_NE(state_, UNINITIALIZED);
+ queue_.Reset();
+ timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
+ in_media_segment_ = false;
+}
+
+bool MP3StreamParser::Parse(const uint8* buf, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+ DCHECK(buf);
+ DCHECK_GT(size, 0);
+ DCHECK_NE(state_, UNINITIALIZED);
+
+ if (state_ == PARSE_ERROR)
+ return false;
+
+ DCHECK_EQ(state_, INITIALIZED);
+
+ queue_.Push(buf, size);
+
+ bool end_of_segment = true;
+ BufferQueue buffers;
+ for (;;) {
+ const uint8* data;
+ int data_size;
+ queue_.Peek(&data, &data_size);
+
+ if (size < 4)
+ break;
+
+ uint32 start_code = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3];
+ int bytes_read = 0;
+ bool parsed_metadata = true;
+ if ((start_code & kMP3StartCodeMask) == kMP3StartCodeMask) {
+ bytes_read = ParseMP3Frame(data, data_size, &buffers);
+
+ // Only allow the current segment to end if a full frame has been parsed.
+ end_of_segment = bytes_read > 0;
+ parsed_metadata = false;
+ } else if (start_code == kICYStartCode) {
+ bytes_read = ParseIcecastHeader(data, data_size);
+ } else if ((start_code & kID3StartCodeMask) == kID3v1StartCode) {
+ bytes_read = ParseID3v1(data, data_size);
+ } else if ((start_code & kID3StartCodeMask) == kID3v2StartCode) {
+ bytes_read = ParseID3v2(data, data_size);
+ } else {
+ bytes_read = FindNextValidStartCode(data, data_size);
+
+ if (bytes_read > 0) {
+ DVLOG(1) << "Unexpected start code 0x" << std::hex << start_code;
+ DVLOG(1) << "SKIPPING " << bytes_read << " bytes of garbage.";
+ }
+ }
+
+ CHECK_LE(bytes_read, data_size);
+
+ if (bytes_read < 0) {
+ ChangeState(PARSE_ERROR);
+ return false;
+ } else if (bytes_read == 0) {
+ // Need more data.
+ break;
+ }
+
+ // Send pending buffers if we have encountered metadata.
+ if (parsed_metadata && !buffers.empty() && !SendBuffers(&buffers, true))
+ return false;
+
+ queue_.Pop(bytes_read);
+ end_of_segment = true;
+ }
+
+ if (buffers.empty())
+ return true;
+
+ // Send buffers collected in this append that haven't been sent yet.
+ return SendBuffers(&buffers, end_of_segment);
+}
+
+void MP3StreamParser::ChangeState(State state) {
+ DVLOG(1) << __FUNCTION__ << "() : " << state_ << " -> " << state;
+ state_ = state;
+}
+
+int MP3StreamParser::ParseFrameHeader(const uint8* data, int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count) const {
+ DCHECK(data);
+ DCHECK_GE(size, 0);
+ DCHECK(frame_size);
+
+ if (size < 4)
+ return 0;
+
+ BitReader reader(data, size);
+ int sync;
+ int version;
+ int layer;
+ int is_protected;
+ int bitrate_index;
+ int sample_rate_index;
+ int has_padding;
+ int is_private;
+ int channel_mode;
+ int other_flags;
+
+ if (!reader.ReadBits(11, &sync) ||
+ !reader.ReadBits(2, &version) ||
+ !reader.ReadBits(2, &layer) ||
+ !reader.ReadBits(1, &is_protected) ||
+ !reader.ReadBits(4, &bitrate_index) ||
+ !reader.ReadBits(2, &sample_rate_index) ||
+ !reader.ReadBits(1, &has_padding) ||
+ !reader.ReadBits(1, &is_private) ||
+ !reader.ReadBits(2, &channel_mode) ||
+ !reader.ReadBits(6, &other_flags)) {
+ return -1;
+ }
+
+ DVLOG(2) << "Header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " bitrate_index 0x" << bitrate_index
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_mode 0x" << channel_mode;
+
+ if (sync != 0x7ff ||
+ version == kVersionReserved ||
+ layer == kLayerReserved ||
+ bitrate_index == kBitrateFree || bitrate_index == kBitrateBad ||
+ sample_rate_index == kSampleRateReserved) {
+ MEDIA_LOG(log_cb_) << "Invalid header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " bitrate_index 0x" << bitrate_index
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_mode 0x" << channel_mode;
+ return -1;
+ }
+
+ if (layer == kLayer2 && kIsAllowed[bitrate_index][channel_mode]) {
+ MEDIA_LOG(log_cb_) << "Invalid (bitrate_index, channel_mode) combination :"
+ << std::hex
+ << " bitrate_index " << bitrate_index
+ << " channel_mode " << channel_mode;
+ return -1;
+ }
+
+ int bitrate = kBitrateMap[bitrate_index][kVersionLayerMap[version][layer]];
+
+ if (bitrate == 0) {
+ MEDIA_LOG(log_cb_) << "Invalid bitrate :" << std::hex
+ << " version " << version
+ << " layer " << layer
+ << " bitrate_index " << bitrate_index;
+ return -1;
+ }
+
+ DVLOG(2) << " bitrate " << bitrate;
+
+ int frame_sample_rate = kSampleRateMap[sample_rate_index][version];
+ if (frame_sample_rate == 0) {
+ MEDIA_LOG(log_cb_) << "Invalid sample rate :" << std::hex
+ << " version " << version
+ << " sample_rate_index " << sample_rate_index;
+ return -1;
+ }
+
+ if (sample_rate)
+ *sample_rate = frame_sample_rate;
+
+ // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
+ // Table 2.1.5
+ int samples_per_frame;
+ switch (layer) {
+ case kLayer1:
+ samples_per_frame = 384;
+ break;
+
+ case kLayer2:
+ samples_per_frame = 1152;
+ break;
+
+ case kLayer3:
+ if (version == kVersion2 || version == kVersion2_5)
+ samples_per_frame = 576;
+ else
+ samples_per_frame = 1152;
+ break;
+
+ default:
+ return -1;
+ }
+
+ if (sample_count)
+ *sample_count = samples_per_frame;
+
+ // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
+ // Text just below Table 2.1.5.
+ if (layer == kLayer1) {
+ // This formulation is a slight variation on the equation below,
+ // but has slightly different truncation characteristics to deal
+ // with the fact that Layer 1 has 4 byte "slots" instead of single
+ // byte ones.
+ *frame_size = 4 * (12 * bitrate * 1000 / frame_sample_rate);
+ } else {
+ *frame_size =
+ ((samples_per_frame / 8) * bitrate * 1000) / frame_sample_rate;
+ }
+
+ if (has_padding)
+ *frame_size += (layer == kLayer1) ? 4 : 1;
+
+ if (channel_layout) {
+ // Map Stereo(0), Joint Stereo(1), and Dual Channel (2) to
+ // CHANNEL_LAYOUT_STEREO and Single Channel (3) to CHANNEL_LAYOUT_MONO.
+ *channel_layout =
+ (channel_mode == 3) ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ }
+
+ return 4;
+}
+
+int MP3StreamParser::ParseMP3Frame(const uint8* data,
+ int size,
+ BufferQueue* buffers) {
+ DVLOG(2) << __FUNCTION__ << "(" << size << ")";
+
+ int sample_rate;
+ ChannelLayout channel_layout;
+ int frame_size;
+ int sample_count;
+ int bytes_read = ParseFrameHeader(
+ data, size, &frame_size, &sample_rate, &channel_layout, &sample_count);
+
+ if (bytes_read <= 0)
+ return bytes_read;
+
+ // Make sure data contains the entire frame.
+ if (size < frame_size)
+ return 0;
+
+ DVLOG(2) << " sample_rate " << sample_rate
+ << " channel_layout " << channel_layout
+ << " frame_size " << frame_size;
+
+ if (config_.IsValidConfig() &&
+ (config_.samples_per_second() != sample_rate ||
+ config_.channel_layout() != channel_layout)) {
+ // Clear config data so that a config change is initiated.
+ config_ = AudioDecoderConfig();
+
+ // Send all buffers associated with the previous config.
+ if (!buffers->empty() && !SendBuffers(buffers, true))
+ return -1;
+ }
+
+ if (!config_.IsValidConfig()) {
+ config_.Initialize(kCodecMP3, kSampleFormatF32, channel_layout,
+ sample_rate, NULL, 0, false, false,
+ base::TimeDelta(), base::TimeDelta());
+
+ base::TimeDelta base_timestamp;
+ if (timestamp_helper_)
+ base_timestamp = timestamp_helper_->GetTimestamp();
+
+ timestamp_helper_.reset(new AudioTimestampHelper(sample_rate));
+ timestamp_helper_->SetBaseTimestamp(base_timestamp);
+
+ VideoDecoderConfig video_config;
+ bool success = config_cb_.Run(config_, video_config);
+
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(success, kInfiniteDuration());
+
+ if (!success)
+ return -1;
+ }
+
+ scoped_refptr<StreamParserBuffer> buffer =
+ StreamParserBuffer::CopyFrom(data, frame_size, true);
+ buffer->set_timestamp(timestamp_helper_->GetTimestamp());
+ buffer->set_duration(timestamp_helper_->GetFrameDuration(sample_count));
+ buffers->push_back(buffer);
+
+ timestamp_helper_->AddFrames(sample_count);
+
+ return frame_size;
+}
+
+int MP3StreamParser::ParseIcecastHeader(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < 4)
+ return 0;
+
+ if (memcmp("ICY ", data, 4))
+ return -1;
+
+ int locate_size = std::min(size, kMaxIcecastHeaderSize);
+ int offset = net::HttpUtil::LocateEndOfHeaders(
+ reinterpret_cast<const char*>(data), locate_size, 4);
+ if (offset < 0) {
+ if (locate_size == kMaxIcecastHeaderSize) {
+ MEDIA_LOG(log_cb_) << "Icecast header is too large.";
+ return -1;
+ }
+
+ return 0;
+ }
+
+ return offset;
+}
+
+int MP3StreamParser::ParseID3v1(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < kID3v1Size)
+ return 0;
+
+ // TODO(acolwell): Add code to actually validate ID3v1 data and
+ // expose it as a metadata text track.
+ return !memcmp(data, "TAG+", 4) ? kID3v1ExtendedSize : kID3v1Size;
+}
+
+int MP3StreamParser::ParseID3v2(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < 10)
+ return 0;
+
+ BitReader reader(data, size);
+ int32 id;
+ int version;
+ uint8 flags;
+ int32 id3_size;
+
+ if (!reader.ReadBits(24, &id) ||
+ !reader.ReadBits(16, &version) ||
+ !reader.ReadBits(8, &flags) ||
+ !ParseSyncSafeInt(&reader, &id3_size)) {
+ return -1;
+ }
+
+ int32 actual_tag_size = 10 + id3_size;
+
+ // Increment size if 'Footer present' flag is set.
+ if (flags & 0x10)
+ actual_tag_size += 10;
+
+ // Make sure we have the entire tag.
+ if (size < actual_tag_size)
+ return 0;
+
+ // TODO(acolwell): Add code to actually validate ID3v2 data and
+ // expose it as a metadata text track.
+ return actual_tag_size;
+}
+
+bool MP3StreamParser::ParseSyncSafeInt(BitReader* reader, int32* value) {
+ *value = 0;
+ for (int i = 0; i < 4; ++i) {
+ uint8 tmp;
+ if (!reader->ReadBits(1, &tmp) || tmp != 0) {
+ MEDIA_LOG(log_cb_) << "ID3 syncsafe integer byte MSb is not 0!";
+ return false;
+ }
+
+ if (!reader->ReadBits(7, &tmp))
+ return false;
+
+ *value <<= 7;
+ *value += tmp;
+ }
+
+ return true;
+}
+
+int MP3StreamParser::FindNextValidStartCode(const uint8* data, int size) const {
+ const uint8* start = data;
+ const uint8* end = data + size;
+
+ while (start < end) {
+ int bytes_left = end - start;
+ const uint8* candidate_start_code =
+ static_cast<const uint8*>(memchr(start, 0xff, bytes_left));
+
+ if (!candidate_start_code)
+ return 0;
+
+ bool parse_header_failed = false;
+ const uint8* sync = candidate_start_code;
+ // Try to find 3 valid frames in a row. 3 was selected to decrease
+ // the probability of false positives.
+ for (int i = 0; i < 3; ++i) {
+ int sync_size = end - sync;
+ int frame_size;
+ int sync_bytes = ParseFrameHeader(
+ sync, sync_size, &frame_size, NULL, NULL, NULL);
+
+ if (sync_bytes == 0)
+ return 0;
+
+ if (sync_bytes > 0) {
+ DCHECK_LT(sync_bytes, sync_size);
+
+ // Skip over this frame so we can check the next one.
+ sync += frame_size;
+
+ // Make sure the next frame starts inside the buffer.
+ if (sync >= end)
+ return 0;
+ } else {
+ DVLOG(1) << "ParseFrameHeader() " << i << " failed @" << (sync - data);
+ parse_header_failed = true;
+ break;
+ }
+ }
+
+ if (parse_header_failed) {
+ // One of the frame header parses failed so |candidate_start_code|
+ // did not point to the start of a real frame. Move |start| forward
+ // so we can find the next candidate.
+ start = candidate_start_code + 1;
+ continue;
+ }
+
+ return candidate_start_code - data;
+ }
+
+ return 0;
+}
+
+bool MP3StreamParser::SendBuffers(BufferQueue* buffers, bool end_of_segment) {
+ DCHECK(!buffers->empty());
+
+ if (!in_media_segment_) {
+ in_media_segment_ = true;
+ new_segment_cb_.Run();
+ }
+
+ BufferQueue empty_video_buffers;
+ if (!new_buffers_cb_.Run(*buffers, empty_video_buffers))
+ return false;
+ buffers->clear();
+
+ if (end_of_segment) {
+ in_media_segment_ = false;
+ end_of_segment_cb_.Run();
+ }
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/mp3/mp3_stream_parser.h b/chromium/media/mp3/mp3_stream_parser.h
new file mode 100644
index 00000000000..97730ae6e82
--- /dev/null
+++ b/chromium/media/mp3/mp3_stream_parser.h
@@ -0,0 +1,127 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MP3_MP3_STREAM_PARSER_H_
+#define MEDIA_MP3_MP3_STREAM_PARSER_H_
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/byte_queue.h"
+#include "media/base/media_export.h"
+#include "media/base/stream_parser.h"
+
+namespace media {
+
+class BitReader;
+
+class MEDIA_EXPORT MP3StreamParser : public StreamParser {
+ public:
+ MP3StreamParser();
+ virtual ~MP3StreamParser();
+
+ // StreamParser implementation.
+ virtual void Init(const InitCB& init_cb, const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ const NewTextBuffersCB& text_cb,
+ const NeedKeyCB& need_key_cb,
+ const AddTextTrackCB& add_text_track_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual bool Parse(const uint8* buf, int size) OVERRIDE;
+
+ private:
+ enum State {
+ UNINITIALIZED,
+ INITIALIZED,
+ PARSE_ERROR
+ };
+
+ State state_;
+
+ InitCB init_cb_;
+ NewConfigCB config_cb_;
+ NewBuffersCB new_buffers_cb_;
+ NewMediaSegmentCB new_segment_cb_;
+ base::Closure end_of_segment_cb_;
+ LogCB log_cb_;
+
+ ByteQueue queue_;
+
+ AudioDecoderConfig config_;
+ scoped_ptr<AudioTimestampHelper> timestamp_helper_;
+ bool in_media_segment_;
+
+ void ChangeState(State state);
+
+ // Parsing functions for various byte stream elements.
+ // |data| & |size| describe the data available for parsing.
+ // These functions are expected to consume an entire frame/header.
+ // It should only return a value greater than 0 when |data| has
+ // enough bytes to successfully parse & consume the entire element.
+ //
+ // |frame_size| - Required parameter that is set to the size of the frame, in
+ // bytes, including the frame header if the function returns a value > 0.
+ // |sample_rate| - Optional parameter that is set to the sample rate
+ // of the frame if this function returns a value > 0.
+ // |channel_layout| - Optional parameter that is set to the channel_layout
+ // of the frame if this function returns a value > 0.
+ // |sample_count| - Optional parameter that is set to the number of samples
+ // in the frame if this function returns a value > 0.
+ //
+ // |sample_rate|, |channel_layout|, |sample_count| may be NULL if the caller
+ // is not interested in receiving these values from the frame header.
+ //
+ // Returns:
+ // > 0 : The number of bytes parsed.
+ // 0 : If more data is needed to parse the entire element.
+ // < 0 : An error was encountered during parsing.
+ int ParseFrameHeader(const uint8* data, int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count) const;
+ int ParseMP3Frame(const uint8* data, int size, BufferQueue* buffers);
+ int ParseIcecastHeader(const uint8* data, int size);
+ int ParseID3v1(const uint8* data, int size);
+ int ParseID3v2(const uint8* data, int size);
+
+ // Parses an ID3v2 "sync safe" integer.
+ // |reader| - A BitReader to read from.
+ // |value| - Set to the integer value read, if true is returned.
+ //
+ // Returns true if the integer was successfully parsed and |value|
+ // was set.
+ // Returns false if an error was encountered. The state of |value| is
+ // undefined when false is returned.
+ bool ParseSyncSafeInt(BitReader* reader, int32* value);
+
+ // Scans |data| for the next valid start code.
+ // Returns:
+ // > 0 : The number of bytes that should be skipped to reach the
+ // next start code..
+ // 0 : If a valid start code was not found and more data is needed.
+ // < 0 : An error was encountered during parsing.
+ int FindNextValidStartCode(const uint8* data, int size) const;
+
+ // Sends the buffers in |buffers| to |new_buffers_cb_| and then clears
+ // |buffers|.
+ // If |end_of_segment| is set to true, then |end_of_segment_cb_| is called
+ // after |new_buffers_cb_| to signal that these buffers represent the end of a
+ // media segment.
+ // Returns true if the buffers are sent successfully.
+ bool SendBuffers(BufferQueue* buffers, bool end_of_segment);
+
+ DISALLOW_COPY_AND_ASSIGN(MP3StreamParser);
+};
+
+} // namespace media
+
+#endif // MEDIA_MP3_MP3_STREAM_PARSER_H_
diff --git a/chromium/media/mp3/mp3_stream_parser_unittest.cc b/chromium/media/mp3/mp3_stream_parser_unittest.cc
new file mode 100644
index 00000000000..9d309544af6
--- /dev/null
+++ b/chromium/media/mp3/mp3_stream_parser_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/test_data_util.h"
+#include "media/base/video_decoder_config.h"
+#include "media/mp3/mp3_stream_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class MP3StreamParserTest : public testing::Test {
+ public:
+ MP3StreamParserTest() {}
+
+ protected:
+ MP3StreamParser parser_;
+ std::stringstream results_stream_;
+
+ bool AppendData(const uint8* data, size_t length) {
+ return parser_.Parse(data, length);
+ }
+
+ bool AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
+ const uint8* start = data;
+ const uint8* end = data + length;
+ while (start < end) {
+ size_t append_size =
+ std::min(piece_size, static_cast<size_t>(end - start));
+ if (!AppendData(start, append_size))
+ return false;
+ start += append_size;
+ }
+ return true;
+ }
+
+ void OnInitDone(bool success, base::TimeDelta duration) {
+ DVLOG(1) << __FUNCTION__ << "(" << success << ", "
+ << duration.InMilliseconds() << ")";
+ }
+
+ bool OnNewConfig(const AudioDecoderConfig& audio_config,
+ const VideoDecoderConfig& video_config) {
+ DVLOG(1) << __FUNCTION__ << "(" << audio_config.IsValidConfig() << ", "
+ << video_config.IsValidConfig() << ")";
+ EXPECT_TRUE(audio_config.IsValidConfig());
+ EXPECT_FALSE(video_config.IsValidConfig());
+ return true;
+ }
+
+ std::string BufferQueueToString(const StreamParser::BufferQueue& buffers) {
+ std::stringstream ss;
+
+ ss << "{";
+ for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
+ itr != buffers.end();
+ ++itr) {
+ ss << " " << (*itr)->timestamp().InMilliseconds();
+ if ((*itr)->IsKeyframe())
+ ss << "K";
+ }
+ ss << " }";
+
+ return ss.str();
+ }
+
+ bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers) {
+ EXPECT_FALSE(audio_buffers.empty());
+ EXPECT_TRUE(video_buffers.empty());
+
+ std::string buffers_str = BufferQueueToString(audio_buffers);
+ DVLOG(1) << __FUNCTION__ << " : " << buffers_str;
+ results_stream_ << buffers_str;
+ return true;
+ }
+
+ bool OnNewTextBuffers(TextTrack* text_track,
+ const StreamParser::BufferQueue& buffers) {
+ return true;
+ }
+
+ void OnKeyNeeded(const std::string& type,
+ const std::vector<uint8>& init_data) {
+ DVLOG(1) << __FUNCTION__ << "(" << type << ", " << init_data.size() << ")";
+ }
+
+ scoped_ptr<TextTrack> OnAddTextTrack(TextKind kind,
+ const std::string& label,
+ const std::string& language) {
+ return scoped_ptr<TextTrack>();
+ }
+
+ void OnNewSegment() {
+ DVLOG(1) << __FUNCTION__;
+ results_stream_ << "NewSegment";
+ }
+
+ void OnEndOfSegment() {
+ DVLOG(1) << __FUNCTION__;
+ results_stream_ << "EndOfSegment";
+ }
+
+ void InitializeParser() {
+ parser_.Init(
+ base::Bind(&MP3StreamParserTest::OnInitDone, base::Unretained(this)),
+ base::Bind(&MP3StreamParserTest::OnNewConfig, base::Unretained(this)),
+ base::Bind(&MP3StreamParserTest::OnNewBuffers, base::Unretained(this)),
+ base::Bind(&MP3StreamParserTest::OnNewTextBuffers,
+ base::Unretained(this)),
+ base::Bind(&MP3StreamParserTest::OnKeyNeeded, base::Unretained(this)),
+ base::Bind(&MP3StreamParserTest::OnAddTextTrack,
+ base::Unretained(this)),
+ base::Bind(&MP3StreamParserTest::OnNewSegment, base::Unretained(this)),
+ base::Bind(&MP3StreamParserTest::OnEndOfSegment,
+ base::Unretained(this)),
+ LogCB());
+ }
+
+ std::string ParseFile(const std::string& filename, int append_bytes) {
+ results_stream_.clear();
+ InitializeParser();
+
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
+ EXPECT_TRUE(
+ AppendDataInPieces(buffer->data(), buffer->data_size(), append_bytes));
+ return results_stream_.str();
+ }
+};
+
+// Test parsing with small prime sized chunks to smoke out "power of
+// 2" field size assumptions.
+TEST_F(MP3StreamParserTest, UnalignedAppend) {
+ std::string expected =
+ "NewSegment"
+ "{ 0K }"
+ "{ 26K }"
+ "{ 52K }"
+ "{ 78K }"
+ "{ 104K }"
+ "{ 130K }"
+ "{ 156K }"
+ "{ 182K }"
+ "EndOfSegment"
+ "NewSegment"
+ "{ 208K }"
+ "{ 235K }"
+ "{ 261K }"
+ "EndOfSegment"
+ "NewSegment"
+ "{ 287K }"
+ "{ 313K }"
+ "EndOfSegment";
+ EXPECT_EQ(expected, ParseFile("sfx.mp3", 17));
+}
+
+// Test parsing with a larger piece size to verify that multiple buffers
+// are passed to |new_buffer_cb_|.
+TEST_F(MP3StreamParserTest, UnalignedAppend512) {
+ std::string expected =
+ "NewSegment"
+ "{ 0K }"
+ "{ 26K 52K 78K 104K }"
+ "{ 130K 156K 182K }"
+ "{ 208K 235K 261K 287K }"
+ "{ 313K }"
+ "EndOfSegment";
+ EXPECT_EQ(expected, ParseFile("sfx.mp3", 512));
+}
+
+} // namespace media
diff --git a/chromium/media/mp4/cenc.cc b/chromium/media/mp4/cenc.cc
index 104948dd4fa..10f3a2a696a 100644
--- a/chromium/media/mp4/cenc.cc
+++ b/chromium/media/mp4/cenc.cc
@@ -42,12 +42,16 @@ bool FrameCENCInfo::Parse(int iv_size, BufferReader* reader) {
return true;
}
-size_t FrameCENCInfo::GetTotalSizeOfSubsamples() const {
+bool FrameCENCInfo::GetTotalSizeOfSubsamples(size_t* total_size) const {
size_t size = 0;
for (size_t i = 0; i < subsamples.size(); i++) {
- size += subsamples[i].clear_bytes + subsamples[i].cypher_bytes;
+ size += subsamples[i].clear_bytes;
+ RCHECK(size >= subsamples[i].clear_bytes); // overflow
+ size += subsamples[i].cypher_bytes;
+ RCHECK(size >= subsamples[i].cypher_bytes); // overflow
}
- return size;
+ *total_size = size;
+ return true;
}
} // namespace mp4
diff --git a/chromium/media/mp4/cenc.h b/chromium/media/mp4/cenc.h
index e558559a939..e42709149f2 100644
--- a/chromium/media/mp4/cenc.h
+++ b/chromium/media/mp4/cenc.h
@@ -21,8 +21,8 @@ struct FrameCENCInfo {
FrameCENCInfo();
~FrameCENCInfo();
- bool Parse(int iv_size, BufferReader* r);
- size_t GetTotalSizeOfSubsamples() const;
+ bool Parse(int iv_size, BufferReader* r) WARN_UNUSED_RESULT;
+ bool GetTotalSizeOfSubsamples(size_t* total_size) const WARN_UNUSED_RESULT;
};
diff --git a/chromium/media/mp4/mp4_stream_parser.cc b/chromium/media/mp4/mp4_stream_parser.cc
index fc4ee8abd1a..26cee44d14e 100644
--- a/chromium/media/mp4/mp4_stream_parser.cc
+++ b/chromium/media/mp4/mp4_stream_parser.cc
@@ -257,7 +257,8 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
audio_config.Initialize(
codec, sample_format, channel_layout, sample_per_second,
extra_data.size() ? &extra_data[0] : NULL, extra_data.size(),
- is_audio_track_encrypted_, false);
+ is_audio_track_encrypted_, false, base::TimeDelta(),
+ base::TimeDelta());
has_audio_ = true;
audio_track_id_ = track->header.track_id;
}
@@ -339,14 +340,14 @@ void MP4StreamParser::EmitNeedKeyIfNecessary(
for (size_t i = 0; i < headers.size(); i++)
total_size += headers[i].raw_box.size();
- scoped_ptr<uint8[]> init_data(new uint8[total_size]);
+ std::vector<uint8> init_data(total_size);
size_t pos = 0;
for (size_t i = 0; i < headers.size(); i++) {
- memcpy(&init_data.get()[pos], &headers[i].raw_box[0],
+ memcpy(&init_data[pos], &headers[i].raw_box[0],
headers[i].raw_box.size());
pos += headers[i].raw_box.size();
}
- need_key_cb_.Run(kMp4InitDataType, init_data.Pass(), total_size);
+ need_key_cb_.Run(kMp4InitDataType, init_data);
}
bool MP4StreamParser::PrepareAVCBuffer(
diff --git a/chromium/media/mp4/mp4_stream_parser_unittest.cc b/chromium/media/mp4/mp4_stream_parser_unittest.cc
index fa880ac38c6..816a2106e39 100644
--- a/chromium/media/mp4/mp4_stream_parser_unittest.cc
+++ b/chromium/media/mp4/mp4_stream_parser_unittest.cc
@@ -94,11 +94,10 @@ class MP4StreamParserTest : public testing::Test {
}
void KeyNeededF(const std::string& type,
- scoped_ptr<uint8[]> init_data, int init_data_size) {
- DVLOG(1) << "KeyNeededF: " << init_data_size;
+ const std::vector<uint8>& init_data) {
+ DVLOG(1) << "KeyNeededF: " << init_data.size();
EXPECT_EQ(kMp4InitDataType, type);
- EXPECT_TRUE(init_data.get());
- EXPECT_GT(init_data_size, 0);
+ EXPECT_FALSE(init_data.empty());
}
scoped_ptr<TextTrack> AddTextTrackF(
diff --git a/chromium/media/mp4/track_run_iterator.cc b/chromium/media/mp4/track_run_iterator.cc
index f16a8bffd1f..95dab69ea4f 100644
--- a/chromium/media/mp4/track_run_iterator.cc
+++ b/chromium/media/mp4/track_run_iterator.cc
@@ -421,9 +421,10 @@ scoped_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
const FrameCENCInfo& cenc_info = cenc_info_[sample_idx];
DCHECK(is_encrypted() && !AuxInfoNeedsToBeCached());
+ size_t total_size = 0;
if (!cenc_info.subsamples.empty() &&
- (cenc_info.GetTotalSizeOfSubsamples() !=
- static_cast<size_t>(sample_size()))) {
+ (!cenc_info.GetTotalSizeOfSubsamples(&total_size) ||
+ total_size != static_cast<size_t>(sample_size()))) {
MEDIA_LOG(log_cb_) << "Incorrect CENC subsample size.";
return scoped_ptr<DecryptConfig>();
}
diff --git a/chromium/media/tools/bug_hunter/bug_hunter.py b/chromium/media/tools/bug_hunter/bug_hunter.py
deleted file mode 100755
index 19a2f8f75f8..00000000000
--- a/chromium/media/tools/bug_hunter/bug_hunter.py
+++ /dev/null
@@ -1,380 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This script queries the Chromium issue tracker and e-mails the results.
-
-It queries issue tracker using Issue Tracker API. The query
-parameters can be specified by command-line arguments. For example, with the
-following command:
-
- 'python bug_hunter.py -q video Status:Unconfirmed OR audio Status:Unconfirmed
- -s sender@chromium.org -r receiver@chromium.org -v 100 -u days'
-
-You will find all 'Unconfirmed' issues created in the last 100 days containing
-'video' or 'audio' in their content/comments. The content of these issues are
-sent to receiver@chromium.org.
-
-TODO(imasaki): users can specify the interval as say: "100d" for "100 days".
-
-There are two limitations in the current implementation of issue tracker API
-and UI:
-* only outermost OR is valid. For example, the query
- 'video OR audio Status:Unconfirmed' is translated into
- 'video OR (audio AND Status:Unconfirmed)'
-* brackets are not supported. For example, the query
- '(video OR audio) Status:Unconfirmed' does not work.
-
-You need to install following to run this script
- gdata-python-client (http://code.google.com/p/gdata-python-client/)
- rfc3339.py (http://henry.precheur.org/projects/rfc3339)
-
-Links:
-* Chromium issue tracker: http://code.google.com/p/chromium/issues/list
-* Issue tracker API: http://code.google.com/p/support/wiki/IssueTrackerAPI
-* Search tips for the issue tracker:
- http://code.google.com/p/chromium/issues/searchtips
-"""
-
-import csv
-import datetime
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
-import logging
-from operator import itemgetter
-import optparse
-import re
-import smtplib
-import socket
-import sys
-import urllib
-
-try:
- import gdata.data
- import gdata.projecthosting.client
-except ImportError:
- logging.error('gdata-client needs to be installed. Please install\n'
- 'and try again (http://code.google.com/p/gdata-python-client/)')
- sys.exit(1)
-
-try:
- import rfc3339
-except ImportError:
- logging.error('rfc3339 needs to be installed. Please install\n'
- 'and try again (http://henry.precheur.org/projects/rfc3339)')
- sys.exit(1)
-
-# A list of default values.
-_DEFAULT_INTERVAL_UNIT = 'hours'
-_DEFAULT_ISSUE_ELEMENT_IN_EMAIL = ('author', 'status', 'state', 'content',
- 'comments', 'labels', 'urls')
-_DEFAULT_PROJECT_NAME = 'chromium'
-_DEFAULT_QUERY_TITLE = 'potential media bugs'
-_DEFAULT_QUERY = ('video -has:Feature -has:Owner -label:nomedia '
- 'status:Unconfirmed OR audio -has:Feature -has:Owner '
- '-label:nomedia status:Unconfirmed')
-_DEFAULT_OUTPUT_FILENAME = 'output.csv'
-_DETAULT_MAX_COMMENTS = 1000
-
-_INTERVAL_UNIT_CHOICES = ('hours', 'days', 'weeks')
-
-# URLs in this list are excluded from URL extraction from bug
-# content/comments. Each list element should not contain the url ending in
-# '/'. For example, the element should be 'http://www.google.com' but not
-# 'http://www.google.com/'
-_URL_EXCLUSION_LIST = ('http://www.youtube.com/html5',
- 'http://www.google.com')
-_ISSUE_ELEMENT_IN_EMAIL_CHOICES = ('issue_id', 'author', 'status', 'state',
- 'content', 'comments', 'labels', 'urls',
- 'mstone')
-
-
-def ParseArgs():
- """Returns options dictionary from parsed command line arguments."""
- parser = optparse.OptionParser()
-
- parser.add_option('-e', '--email-entries',
- help=('A comma-separated list of issue entries that are '
- 'sent in the email content. '
- 'Possible strings are %s. Default: %%default.' %
- ', '.join(_ISSUE_ELEMENT_IN_EMAIL_CHOICES)),
- default=','.join(_DEFAULT_ISSUE_ELEMENT_IN_EMAIL))
- parser.add_option('-l', '--max-comments',
- help=('The maximum number of comments returned for each '
- 'issue in a reverse chronological order. '
- 'Default: %default.'),
- type='int', default=_DETAULT_MAX_COMMENTS)
- parser.add_option('-o', '--output-filename',
- help=('Filename for result output in CSV format. '
- 'Default: %default.'),
- default=_DEFAULT_OUTPUT_FILENAME, metavar='FILE')
- parser.add_option('-p', '--project-name', default=_DEFAULT_PROJECT_NAME,
- help='Project name string. Default: %default')
- parser.add_option('-q', '--query', default=_DEFAULT_QUERY,
- help=('Query to be used to find bugs. The detail can be '
- 'found in Chromium Issue tracker page '
- 'http://code.google.com/p/chromium/issues/searchtips.'
- ' Default: "%default".'))
- parser.add_option('-r', '--receiver-email-address',
- help="Receiver's email address (Required).")
- parser.add_option('-s', '--sender-email-address',
- help="Sender's email address (Required).")
- parser.add_option('-t', '--query-title',
- default=_DEFAULT_QUERY_TITLE, dest='query_title',
- help=('Query title string used in the subject of the '
- 'result email. Default: %default.'))
- parser.add_option('-u', '--interval_unit', default=_DEFAULT_INTERVAL_UNIT,
- choices=_INTERVAL_UNIT_CHOICES,
- help=('Unit name for |interval_value|. Valid options are '
- '%s. Default: %%default' % (
- ', '.join(_INTERVAL_UNIT_CHOICES))))
- parser.add_option('-v', '--interval-value', type='int',
- help=('Interval value to find bugs. '
- 'The script looks for bugs during '
- 'that interval (up to now). This option is used in '
- 'conjunction with |--interval_unit| option. '
- 'The script looks for all bugs if this is not '
- 'specified.'))
-
- options = parser.parse_args()[0]
-
- options.email_entries = options.email_entries.split(',')
- options.email_entries = [entry for entry in options.email_entries
- if entry in _ISSUE_ELEMENT_IN_EMAIL_CHOICES]
- if not options.email_entries:
- logging.warning('No issue elements in email in option. '
- 'Default email entries will be used.')
- options.email_entries = _DEFAULT_ISSUE_ELEMENT_IN_EMAIL
- logging.info('The following is the issue elements in email: %s ' + (
- ', '.join(options.email_entries)))
- return options
-
-
-class BugHunter(object):
- """This class queries issue trackers and e-mails the results."""
-
- _ISSUE_SEARCH_LINK_BASE = ('http://code.google.com/p/chromium/issues/list?'
- 'can=2&colspec=ID+Pri+Mstone+ReleaseBlock+Area'
- '+Feature+Status+Owner+Summary&cells=tiles'
- '&sort=-id')
- # TODO(imasaki): Convert these into template library.
- _EMAIL_ISSUE_TEMPLATE = ('<li><a href="http://crbug.com/%(issue_id)s">'
- '%(issue_id)s %(title)s</a> ')
- _EMAIL_SUBJECT_TEMPLATE = ('BugHunter found %(n_issues)d %(query_title)s '
- 'bug%(plural)s%(time_msg)s!')
- _EMAIL_MSG_TEMPLATE = ('<a href="%(link_base)s&q=%(unquote_query_text)s">'
- 'Used Query</a>: %(query_text)s<br><br>'
- 'The number of issues : %(n_issues)d<br>'
- '<ul>%(issues)s</ul>')
-
- def __init__(self, options):
- """Sets up initial state for Bug Hunter.
-
- Args:
- options: Command-line options.
- """
- self._client = gdata.projecthosting.client.ProjectHostingClient()
- self._options = options
- self._issue_template = BugHunter._EMAIL_ISSUE_TEMPLATE
- for entry in options.email_entries:
- self._issue_template += '%%(%s)s ' % entry
- self._issue_template += '</li>'
-
- def GetComments(self, issue_id, max_comments):
- """Get comments for a issue.
-
- Args:
- issue_id: Issue id for each issue in the issue tracker.
- max_comments: The maximum number of comments to be returned. The comments
- are returned in a reverse chronological order.
-
- Returns:
- A list of (author name, comments, updated time) tuples.
- """
- comments_feed = self._client.get_comments(self._options.project_name,
- issue_id)
- comment_list = [(comment.content.text, comment.author[0].name.text,
- comment.updated.text)
- for comment
- in list(reversed(comments_feed.entry))[0:max_comments]]
- return comment_list
-
- def GetIssues(self):
- """Get issues from issue tracker and return them.
-
- Returns:
- A list of issues in descending order by issue_id. Each element in the
- list is a dictionary where the keys are 'issue_id', 'title', 'author',
- 'status', 'state', 'content', 'comments', 'labels', 'urls'.
- Returns an empty list when there is no matching issue.
- """
- min_time = None
- if self._options.interval_value:
- # Issue Tracker Data API uses RFC 3339 timestamp format, For example:
- # 2005-08-09T10:57:00-08:00
- # (http://code.google.com/p/support/wiki/IssueTrackerAPIPython)
- delta = datetime.timedelta(
- **{self._options.interval_unit: self._options.interval_value})
- dt = datetime.datetime.now() - delta
- min_time = rfc3339.rfc3339(dt)
-
- query = gdata.projecthosting.client.Query(text_query=self._options.query,
- max_results=1000,
- published_min=min_time)
-
- feed = self._client.get_issues(self._options.project_name, query=query)
- if not feed.entry:
- logging.info('No issues available to match query %s.',
- self._options.query)
- return []
- issues = []
- for entry in feed.entry:
- # The fully qualified id is a URL. We just want the number.
- issue_id = entry.id.text.split('/')[-1]
- if not issue_id.isdigit():
- logging.warning('Issue_id is not correct: %s. Skipping.', issue_id)
- continue
- label_list = [label.text for label in entry.label]
- comments = ''
- if 'comments' in self._options.email_entries:
- comments = ''.join(
- [''.join(comment) if not comment else ''
- for comment
- in self.GetComments(issue_id, self._options.max_comments)])
- content = BugHunterUtils.StripHTML(entry.content.text)
- url_list = list(
- set(re.findall(r'(https?://\S+)', content + comments)))
- url_list = [url for url in url_list
- if not url.rstrip('/') in _URL_EXCLUSION_LIST]
- mstone = ''
- r = re.compile(r'Mstone-(\d*)')
- for label in label_list:
- m = r.search(label)
- if m:
- mstone = m.group(1)
- issues.append(
- {'issue_id': issue_id, 'title': entry.title.text,
- 'author': entry.author[0].name.text,
- 'status': entry.status.text if entry.status is not None else '',
- 'state': entry.state.text if entry.state is not None else '',
- 'content': content, 'mstone': mstone, 'comments': comments,
- 'labels': label_list, 'urls': url_list})
- return sorted(issues, key=itemgetter('issue_id'), reverse=True)
-
- def _SetUpEmailSubjectMsg(self, issues):
- """Set up email subject and its content.
-
- Args:
- issues: Please refer to the return value in GetIssues().
-
- Returns:
- A tuple of two strings (email subject and email content).
- """
- time_msg = ''
- if self._options.interval_value:
- time_msg = ' in the past %s %s%s' % (
- self._options.interval_value, self._options.interval_unit[:-1],
- 's' if self._options.interval_value > 1 else '')
- subject = BugHunter._EMAIL_SUBJECT_TEMPLATE % {
- 'n_issues': len(issues),
- 'query_title': self._options.query_title,
- 'plural': 's' if len(issues) > 1 else '',
- 'time_msg': time_msg}
- content = BugHunter._EMAIL_MSG_TEMPLATE % {
- 'link_base': BugHunter._ISSUE_SEARCH_LINK_BASE,
- 'unquote_query_text': urllib.quote(self._options.query),
- 'query_text': self._options.query,
- 'n_issues': len(issues),
- 'issues': ''.join(
- [self._issue_template % issue for issue in issues])}
- return (subject, content)
-
- def SendResultEmail(self, issues):
- """Send result email.
-
- Args:
- issues: Please refer to the return value in GetIssues().
- """
- subject, content = self._SetUpEmailSubjectMsg(issues)
- BugHunterUtils.SendEmail(
- content, self._options.sender_email_address,
- self._options.receiver_email_address, subject)
-
- def WriteIssuesToFileInCSV(self, issues, filename):
- """Write issues to a file in CSV format.
-
- Args:
- issues: Please refer to the return value in GetIssues().
- filename: File name for CSV file.
- """
- with open(filename, 'w') as f:
- writer = csv.writer(f)
- # Write header first.
- writer.writerow(issues[0].keys())
- for issue in issues:
- writer.writerow(
- [unicode(value).encode('utf-8') for value in issue.values()])
-
-
-class BugHunterUtils(object):
- """Utility class for Bug Hunter."""
-
- @staticmethod
- def StripHTML(string_with_html):
- """Strip HTML tags from string.
-
- Args:
- string_with_html: A string with HTML tags.
-
- Returns:
- A string without HTML tags.
- """
- return re.sub('<[^<]+?>', '', string_with_html)
-
- @staticmethod
- def SendEmail(message, sender_email_address, receivers_email_address,
- subject):
- """Send email using localhost's mail server.
-
- Args:
- message: Email message to be sent.
- sender_email_address: Sender's email address.
- receivers_email_address: Receiver's email address.
- subject: Email subject.
-
- Returns:
- True if successful; False, otherwise.
- """
- try:
- html = '<html><head></head><body>%s</body></html>' % message
- msg = MIMEMultipart('alternative')
- msg['Subject'] = subject
- msg['From'] = sender_email_address
- msg['To'] = receivers_email_address
- msg.attach(MIMEText(html.encode('utf-8'), 'html', _charset='utf-8'))
- smtp_obj = smtplib.SMTP('localhost')
- smtp_obj.sendmail(sender_email_address, receivers_email_address,
- msg.as_string())
- logging.info('Successfully sent email.')
- smtp_obj.quit()
- return True
- except smtplib.SMTPException:
- logging.exception('Authentication failed, unable to send email.')
- except (socket.gaierror, socket.error, socket.herror):
- logging.exception('Unable to send email.')
- return False
-
-
-def Main():
- ops = ParseArgs()
- bh = BugHunter(ops)
- issues = bh.GetIssues()
- if issues and ops.sender_email_address and ops.receiver_email_address:
- bh.SendResultEmail(issues)
- if issues:
- bh.WriteIssuesToFileInCSV(issues, ops.output_filename)
-
-
-if __name__ == '__main__':
- Main()
diff --git a/chromium/media/tools/bug_hunter/bug_hunter_test.py b/chromium/media/tools/bug_hunter/bug_hunter_test.py
deleted file mode 100644
index 0dafd8ab537..00000000000
--- a/chromium/media/tools/bug_hunter/bug_hunter_test.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Integration tests for bug hunter."""
-
-import csv
-from optparse import Values
-import os
-import unittest
-
-from bug_hunter import BugHunter
-
-try:
- import gdata.data
- import gdata.projecthosting.client
-except ImportError:
- logging.error('gdata-client needs to be installed. Please install\n'
- 'and try again (http://code.google.com/p/gdata-python-client/)')
- sys.exit(1)
-
-
-class BugHunterTest(unittest.TestCase):
- """Unit tests for the Bug Hunter class."""
- _TEST_FILENAME = 'test.csv'
-
- def _CleanTestFile(self):
- if os.path.exists(self._TEST_FILENAME):
- os.remove(self._TEST_FILENAME)
-
- def setUp(self):
- self._CleanTestFile()
-
- def tearDown(self):
- self._CleanTestFile()
-
- def _GetIssue(self):
- return [{'issue_id': '0', 'title': 'title', 'author': 'author',
- 'status': 'status', 'state': 'state', 'content': 'content',
- 'comments': [], 'labels': [], 'urls': []}]
-
- def _GetDefaultOption(self, set_10_days_ago, query='steps'):
- ops = Values()
- ops.query = query
- if set_10_days_ago:
- ops.interval_value = 10
- ops.interval_unit = 'days'
- else:
- ops.interval_value = None
- ops.email_entries = ['comments']
- ops.project_name = 'chromium'
- ops.query_title = 'query title'
- ops.max_comments = None
- return ops
-
- def testGetIssueReturnedIssue(self):
- bh = BugHunter(
- self._GetDefaultOption(False,
- query=('audio opened-after:2010/10/10'
- ' opened-before:2010/10/20')))
- self.assertEquals(len(bh.GetIssues()), 18)
-
- def testGetIssueReturnedIssueWithStatus(self):
- ops = self._GetDefaultOption(False)
- ops.query = 'Feature:Media* Status:Unconfirmed'
- issues = BugHunter(ops).GetIssues()
- for issue in issues:
- self.assertEquals(issue['status'], 'Unconfirmed')
-
- def testGetIssueReturnNoIssue(self):
- ops = self._GetDefaultOption(True)
- ops.query = 'thisshouldnotmatchpleaseignorethis*'
- self.assertFalse(BugHunter(ops).GetIssues())
-
- def testGetComments(self):
- comments = BugHunter(self._GetDefaultOption(False)).GetComments(100000, 2)
- self.assertEquals(len(comments), 2)
- expected_comments = [(None, 'rby...@chromium.org',
- '2011-10-31T19:54:40.000Z'),
- (None, 'backer@chromium.org',
- '2011-10-14T13:59:37.000Z')]
- self.assertEquals(comments, expected_comments)
-
- def testWriteIssuesToFileInCSV(self):
- ops = self._GetDefaultOption(False)
- bh = BugHunter(ops)
- bh.WriteIssuesToFileInCSV(self._GetIssue(), self._TEST_FILENAME)
-
- with open(self._TEST_FILENAME, 'r') as f:
- reader = csv.reader(f)
- self.assertEquals(reader.next(), ['status', 'content', 'state',
- 'issue_id', 'urls', 'title', 'labels',
- 'author', 'comments'])
- self.assertEquals(reader.next(), ['status', 'content', 'state', '0',
- '[]', 'title', '[]', 'author', '[]'])
- self.assertRaises(StopIteration, reader.next)
diff --git a/chromium/media/tools/bug_hunter/bug_hunter_unittest.py b/chromium/media/tools/bug_hunter/bug_hunter_unittest.py
deleted file mode 100644
index 0cb11b63bb2..00000000000
--- a/chromium/media/tools/bug_hunter/bug_hunter_unittest.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Unit Tests for bug hunter."""
-
-import logging
-from optparse import Values
-import smtplib
-import sys
-import unittest
-
-from bug_hunter import BugHunter
-from bug_hunter import BugHunterUtils
-
-try:
- import atom.data
- import gdata.data
- import gdata.projecthosting.client
-except ImportError:
- logging.error('gdata-client needs to be installed. Please install\n'
- 'and try again (http://code.google.com/p/gdata-python-client/)')
- sys.exit(1)
-
-
-class MockClient(object):
- """A mock class for gdata.projecthosting.client.ProjectHostingClient.
-
- Mocking the very simple method invocations for get_issues() and
- get_comments().
- """
-
- def _CreateIssues(self, n_issues):
- feed = gdata.projecthosting.data.IssuesFeed()
- for i in xrange(n_issues):
- feed.entry.append(gdata.projecthosting.data.IssueEntry(
- title=atom.data.Title(text='title'),
- content=atom.data.Content(text='http://www.content.com'),
- id=atom.data.Id(text='/' + str(i)),
- status=gdata.projecthosting.data.Status(text='Unconfirmed'),
- state=gdata.projecthosting.data.State(text='open'),
- label=[gdata.projecthosting.data.Label('label1')],
- author=[atom.data.Author(name=atom.data.Name(text='author'))]))
- return feed
-
- def get_issues(self, project_name, query):
- """Get issues using mock object without calling the issue tracker API.
-
- Based on query argument, this returns the dummy issues. The number of
- dummy issues are specified in query.text_query.
-
- Args:
- project_name: A string for project name in the issue tracker.
- query: A query object for querying the issue tracker.
-
- Returns:
- A IssuesFeed object that contains a simple test issue.
- """
- n_issues = 1
- if query.text_query.isdigit():
- n_issues = int(query.text_query)
- return self._CreateIssues(n_issues)
-
- def get_comments(self, project_name, issue_id):
- """Get comments using mock object without calling the issue tracker API.
-
- Args:
- project_name: A string for project name in the issue tracker.
- issue_id: Issue_id string.
-
- Returns:
- A CommentsFeed object that contains a simple test comment.
- """
- feed = gdata.projecthosting.data.CommentsFeed()
- feed.entry = [gdata.projecthosting.data.CommentEntry(
- id=atom.data.Id(text='/0'),
- content=atom.data.Content(text='http://www.comments.com'),
- updated=atom.data.Updated(text='Updated'),
- author=[atom.data.Author(name=atom.data.Name(text='cauthor'))])]
- return feed
-
-
-class BugHunterUnitTest(unittest.TestCase):
- """Unit tests for the Bug Hunter class."""
-
- def setUp(self):
- self._old_client = gdata.projecthosting.client.ProjectHostingClient
- gdata.projecthosting.client.ProjectHostingClient = MockClient
-
- def tearDown(self):
- gdata.projecthosting.client.ProjectHostingClient = self._old_client
-
- def _GetDefaultOption(self, set_10_days_ago, query='steps'):
- ops = Values()
- ops.query = query
- if set_10_days_ago:
- ops.interval_value = 10
- ops.interval_unit = 'days'
- else:
- ops.interval_value = None
- ops.email_entries = ['comments']
- ops.project_name = 'chromium'
- ops.query_title = 'query title'
- ops.max_comments = None
- return ops
-
- def _GetIssue(self, n_issues):
- issues = []
- for i in xrange(n_issues):
- issues.append({'issue_id': str(i), 'title': 'title', 'author': 'author',
- 'status': 'status', 'state': 'state',
- 'content': 'content', 'comments': [],
- 'labels': [], 'urls': []})
- return issues
-
- def testSetUpEmailSubjectMsg(self):
- bh = BugHunter(self._GetDefaultOption(False))
- subject, content = bh._SetUpEmailSubjectMsg(self._GetIssue(1))
- self.assertEquals(subject,
- 'BugHunter found 1 query title bug!')
- self.assertEquals(content,
- ('<a href="http://code.google.com/p/chromium/issues/'
- 'list?can=2&colspec=ID+Pri+Mstone+ReleaseBlock+Area+'
- 'Feature+Status+Owner+Summary&cells=tiles&sort=-id&'
- 'q=steps">Used Query</a>: steps<br><br>The number of '
- 'issues : 1<br><ul><li><a href="http://crbug.com/0">0 '
- 'title</a> [] </li></ul>'))
-
- def testSetUpEmailSubjectMsgMultipleIssues(self):
- bh = BugHunter(self._GetDefaultOption(False))
- subject, content = bh._SetUpEmailSubjectMsg(self._GetIssue(2))
- self.assertEquals(subject,
- 'BugHunter found 2 query title bugs!')
-
- def testSetUpEmailSubjectMsgWith10DaysAgoAndAssertSubject(self):
- bh = BugHunter(self._GetDefaultOption(True))
- subject, _ = bh._SetUpEmailSubjectMsg(self._GetIssue(1))
- self.assertEquals(subject,
- ('BugHunter found 1 query title bug in the past 10 '
- 'days!'))
-
- def testGetIssuesWithMockClient(self):
- bh = BugHunter(self._GetDefaultOption(False,
- query=('dummy')))
- expected_issues = [{'issue_id': '0', 'title': 'title', 'author': 'author',
- 'status': 'Unconfirmed', 'state': 'open',
- 'content': 'http://www.content.com',
- 'comments': '', 'labels': ['label1'],
- 'urls': ['http://www.content.com']}]
- self.assertEquals(expected_issues, bh.GetIssues())
-
-
-class MockSmtp(object):
- """A mock class for SMTP."""
-
- def __init__(self, server):
- pass
-
- def sendmail(self, sender_email_address, receivers_email_addresses,
- msg):
- # TODO(imasaki): Do something here.
- return True
-
- def quit(self):
- pass
-
-
-class BugHunterUtilsTest(unittest.TestCase):
- """Unit tests for the Bug Hunter utility."""
-
- def testStripHTML(self):
- self.assertEquals(BugHunterUtils.StripHTML('<p>X</p>'), 'X')
-
- def testStripHTMLEmpty(self):
- self.assertEquals(BugHunterUtils.StripHTML(''), '')
-
- def testSendEmail(self):
- smtplib.SMTP = MockSmtp
- self.assertEqual(BugHunterUtils.SendEmail('message', 'sender_email_address',
- 'receivers_email_addresses',
- 'subject'),
- True)
diff --git a/chromium/media/tools/demuxer_bench/demuxer_bench.cc b/chromium/media/tools/demuxer_bench/demuxer_bench.cc
index d38e5877433..ab8b313c435 100644
--- a/chromium/media/tools/demuxer_bench/demuxer_bench.cc
+++ b/chromium/media/tools/demuxer_bench/demuxer_bench.cc
@@ -48,8 +48,8 @@ void QuitLoopWithStatus(base::MessageLoop* message_loop,
message_loop->PostTask(FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
}
-static void NeedKey(const std::string& type, scoped_ptr<uint8[]> init_data,
- int init_data_size) {
+static void NeedKey(const std::string& type,
+ const std::vector<uint8>& init_data) {
LOG(INFO) << "File is encrypted.";
}
@@ -194,7 +194,7 @@ int main(int argc, char** argv) {
media::FileDataSource data_source;
CHECK(data_source.Initialize(file_path));
- media::FFmpegNeedKeyCB need_key_cb = base::Bind(&NeedKey);
+ media::Demuxer::NeedKeyCB need_key_cb = base::Bind(&NeedKey);
media::FFmpegDemuxer demuxer(message_loop.message_loop_proxy(),
&data_source,
need_key_cb,
diff --git a/chromium/media/tools/media_bench/media_bench.cc b/chromium/media/tools/media_bench/media_bench.cc
deleted file mode 100644
index 4214988b6ee..00000000000
--- a/chromium/media/tools/media_bench/media_bench.cc
+++ /dev/null
@@ -1,588 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Standalone benchmarking application based on FFmpeg. This tool is used to
-// measure decoding performance between different FFmpeg compile and run-time
-// options. We also use this tool to measure performance regressions when
-// testing newer builds of FFmpeg from trunk.
-
-#include <iomanip>
-#include <iostream>
-#include <string>
-
-#include "base/at_exit.h"
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/file_util.h"
-#include "base/files/file_path.h"
-#include "base/files/memory_mapped_file.h"
-#include "base/logging.h"
-#include "base/md5.h"
-#include "base/path_service.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_util.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "media/base/djb2.h"
-#include "media/base/media.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-#include "media/filters/ffmpeg_glue.h"
-#include "media/filters/ffmpeg_video_decoder.h"
-#include "media/filters/in_memory_url_protocol.h"
-
-// For pipe _setmode to binary
-#if defined(OS_WIN)
-#include <fcntl.h>
-#include <io.h>
-#endif
-
-namespace switches {
-const char kStream[] = "stream";
-const char kVideoThreads[] = "video-threads";
-const char kFast2[] = "fast2";
-const char kErrorCorrection[] = "error-correction";
-const char kSkip[] = "skip";
-const char kFlush[] = "flush";
-const char kDjb2[] = "djb2";
-const char kMd5[] = "md5";
-const char kFrames[] = "frames";
-const char kLoop[] = "loop";
-
-} // namespace switches
-
-#if defined(OS_WIN)
-
-// Enable to build with exception handler
-// #define ENABLE_WINDOWS_EXCEPTIONS 1
-
-#ifdef ENABLE_WINDOWS_EXCEPTIONS
-// warning: disable warning about exception handler.
-#pragma warning(disable:4509)
-#endif
-
-// Thread priorities to make benchmark more stable.
-
-void EnterTimingSection() {
- SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_ABOVE_NORMAL);
-}
-
-void LeaveTimingSection() {
- SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_NORMAL);
-}
-#else
-void EnterTimingSection() {
- pthread_attr_t pta;
- struct sched_param param;
-
- pthread_attr_init(&pta);
- memset(&param, 0, sizeof(param));
- param.sched_priority = 78;
- pthread_attr_setschedparam(&pta, &param);
- pthread_attr_destroy(&pta);
-}
-
-void LeaveTimingSection() {
-}
-#endif
-
-int main(int argc, const char** argv) {
- base::AtExitManager exit_manager;
-
- CommandLine::Init(argc, argv);
-
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- logging::InitLogging(settings);
-
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- const CommandLine::StringVector& filenames = cmd_line->GetArgs();
- if (filenames.empty()) {
- std::cerr << "Usage: " << argv[0] << " [OPTIONS] FILE [DUMPFILE]\n"
- << " --stream=[audio|video] "
- << "Benchmark either the audio or video stream\n"
- << " --video-threads=N "
- << "Decode video using N threads\n"
- << " --frames=N "
- << "Decode N frames\n"
- << " --loop=N "
- << "Loop N times\n"
- << " --fast2 "
- << "Enable fast2 flag\n"
- << " --error-correction "
- << "Enable ffmpeg error correction\n"
- << " --flush "
- << "Flush last frame\n"
- << " --djb2 (aka --hash) "
- << "Hash decoded buffers (DJB2)\n"
- << " --md5 "
- << "Hash decoded buffers (MD5)\n"
- << " --skip=[1|2|3] "
- << "1=loop nonref, 2=loop, 3= frame nonref\n" << std::endl;
- return 1;
- }
-
- // Initialize our media library (try loading DLLs, etc.) before continuing.
- base::FilePath media_path;
- PathService::Get(base::DIR_MODULE, &media_path);
- if (!media::InitializeMediaLibrary(media_path)) {
- std::cerr << "Unable to initialize the media library." << std::endl;
- return 1;
- }
-
- // Retrieve command line options.
- base::FilePath in_path(filenames[0]);
- base::FilePath out_path;
- if (filenames.size() > 1)
- out_path = base::FilePath(filenames[1]);
- AVMediaType target_codec = AVMEDIA_TYPE_UNKNOWN;
-
- // Determine whether to benchmark audio or video decoding.
- std::string stream(cmd_line->GetSwitchValueASCII(switches::kStream));
- if (!stream.empty()) {
- if (stream.compare("audio") == 0) {
- target_codec = AVMEDIA_TYPE_AUDIO;
- } else if (stream.compare("video") == 0) {
- target_codec = AVMEDIA_TYPE_VIDEO;
- } else {
- std::cerr << "Unknown --stream option " << stream << std::endl;
- return 1;
- }
- }
-
- // Determine number of threads to use for video decoding (optional).
- int video_threads = 0;
- std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
- if (!threads.empty() &&
- !base::StringToInt(threads, &video_threads)) {
- video_threads = 0;
- }
-
- // Determine number of frames to decode (optional).
- int max_frames = 0;
- std::string frames_opt(cmd_line->GetSwitchValueASCII(switches::kFrames));
- if (!frames_opt.empty() &&
- !base::StringToInt(frames_opt, &max_frames)) {
- max_frames = 0;
- }
-
- // Determine number of times to loop (optional).
- int max_loops = 0;
- std::string loop_opt(cmd_line->GetSwitchValueASCII(switches::kLoop));
- if (!loop_opt.empty() &&
- !base::StringToInt(loop_opt, &max_loops)) {
- max_loops = 0;
- }
-
- bool fast2 = false;
- if (cmd_line->HasSwitch(switches::kFast2)) {
- fast2 = true;
- }
-
- bool error_correction = false;
- if (cmd_line->HasSwitch(switches::kErrorCorrection)) {
- error_correction = true;
- }
-
- bool flush = false;
- if (cmd_line->HasSwitch(switches::kFlush)) {
- flush = true;
- }
-
- unsigned int hash_value = 5381u; // Seed for DJB2.
- bool hash_djb2 = false;
- if (cmd_line->HasSwitch(switches::kDjb2)) {
- hash_djb2 = true;
- }
-
- base::MD5Context ctx; // Intermediate MD5 data: do not use
- base::MD5Init(&ctx);
- bool hash_md5 = false;
- if (cmd_line->HasSwitch(switches::kMd5))
- hash_md5 = true;
-
- int skip = 0;
- if (cmd_line->HasSwitch(switches::kSkip)) {
- std::string skip_opt(cmd_line->GetSwitchValueASCII(switches::kSkip));
- if (!base::StringToInt(skip_opt, &skip)) {
- skip = 0;
- }
- }
-
- std::ostream* log_out = &std::cout;
-#if defined(ENABLE_WINDOWS_EXCEPTIONS)
- // Catch exceptions so this tool can be used in automated testing.
- __try {
-#endif
-
- base::MemoryMappedFile file_data;
- file_data.Initialize(in_path);
- media::InMemoryUrlProtocol protocol(
- file_data.data(), file_data.length(), false);
-
- // Register FFmpeg and attempt to open file.
- media::FFmpegGlue glue(&protocol);
- if (!glue.OpenContext()) {
- std::cerr << "Error: Could not open input for "
- << in_path.value() << std::endl;
- return 1;
- }
-
- AVFormatContext* format_context = glue.format_context();
-
- // Open output file.
- FILE *output = NULL;
- if (!out_path.empty()) {
- // TODO(fbarchard): Add pipe:1 for piping to stderr.
- if (out_path.value().substr(0, 5) == FILE_PATH_LITERAL("pipe:") ||
- out_path.value() == FILE_PATH_LITERAL("-")) {
- output = stdout;
- log_out = &std::cerr;
-#if defined(OS_WIN)
- _setmode(_fileno(stdout), _O_BINARY);
-#endif
- } else {
- output = file_util::OpenFile(out_path, "wb");
- }
- if (!output) {
- std::cerr << "Error: Could not open output "
- << out_path.value() << std::endl;
- return 1;
- }
- }
-
- // Parse a little bit of the stream to fill out the format context.
- if (avformat_find_stream_info(format_context, NULL) < 0) {
- std::cerr << "Error: Could not find stream info for "
- << in_path.value() << std::endl;
- return 1;
- }
-
- // Find our target stream.
- int target_stream = -1;
- for (size_t i = 0; i < format_context->nb_streams; ++i) {
- AVCodecContext* codec_context = format_context->streams[i]->codec;
- AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);
-
- // See if we found our target codec.
- if (codec_context->codec_type == target_codec && target_stream < 0) {
- *log_out << "* ";
- target_stream = i;
- } else {
- *log_out << " ";
- }
-
- if (!codec || (codec_context->codec_type == AVMEDIA_TYPE_UNKNOWN)) {
- *log_out << "Stream #" << i << ": Unknown" << std::endl;
- } else {
- // Print out stream information
- *log_out << "Stream #" << i << ": " << codec->name << " ("
- << codec->long_name << ")" << std::endl;
- }
- }
-
- // Only continue if we found our target stream.
- if (target_stream < 0) {
- std::cerr << "Error: Could not find target stream "
- << target_stream << " for " << in_path.value() << std::endl;
- return 1;
- }
-
- // Prepare FFmpeg structures.
- AVPacket packet;
- AVCodecContext* codec_context = format_context->streams[target_stream]->codec;
- AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);
-
- // Only continue if we found our codec.
- if (!codec) {
- std::cerr << "Error: Could not find codec for "
- << in_path.value() << std::endl;
- return 1;
- }
-
- if (skip == 1) {
- codec_context->skip_loop_filter = AVDISCARD_NONREF;
- } else if (skip == 2) {
- codec_context->skip_loop_filter = AVDISCARD_ALL;
- } else if (skip == 3) {
- codec_context->skip_loop_filter = AVDISCARD_ALL;
- codec_context->skip_frame = AVDISCARD_NONREF;
- }
- if (fast2) {
- // Note this flag is no longer necessary for H264 multithreading.
- codec_context->flags2 |= CODEC_FLAG2_FAST;
- }
- if (error_correction) {
- codec_context->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
- }
-
- // Initialize threaded decode.
- if (target_codec == AVMEDIA_TYPE_VIDEO && video_threads > 0) {
- codec_context->thread_count = video_threads;
- }
-
- // Initialize our codec.
- if (avcodec_open2(codec_context, codec, NULL) < 0) {
- std::cerr << "Error: Could not open codec "
- << (codec_context->codec ? codec_context->codec->name : "(NULL)")
- << " for " << in_path.value() << std::endl;
- return 1;
- }
-
- // Buffer used for audio decoding.
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> audio_frame(
- avcodec_alloc_frame());
- if (!audio_frame) {
- std::cerr << "Error: avcodec_alloc_frame for "
- << in_path.value() << std::endl;
- return 1;
- }
-
- // Buffer used for video decoding.
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> video_frame(
- avcodec_alloc_frame());
- if (!video_frame) {
- std::cerr << "Error: avcodec_alloc_frame for "
- << in_path.value() << std::endl;
- return 1;
- }
-
- // Remember size of video.
- int video_width = codec_context->width;
- int video_height = codec_context->height;
-
- // Stats collector.
- EnterTimingSection();
- std::vector<double> decode_times;
- decode_times.reserve(4096);
- // Parse through the entire stream until we hit EOF.
- base::TimeTicks start = base::TimeTicks::HighResNow();
- int frames = 0;
- int read_result = 0;
- do {
- read_result = av_read_frame(format_context, &packet);
-
- if (read_result < 0) {
- if (max_loops) {
- --max_loops;
- }
- if (max_loops > 0) {
- av_seek_frame(format_context, -1, 0, AVSEEK_FLAG_BACKWARD);
- read_result = 0;
- continue;
- }
- if (flush) {
- packet.stream_index = target_stream;
- packet.size = 0;
- } else {
- break;
- }
- }
-
- // Only decode packets from our target stream.
- if (packet.stream_index == target_stream) {
- int result = -1;
- if (target_codec == AVMEDIA_TYPE_AUDIO) {
- int size_out = 0;
- int got_audio = 0;
-
- avcodec_get_frame_defaults(audio_frame.get());
-
- base::TimeTicks decode_start = base::TimeTicks::HighResNow();
- result = avcodec_decode_audio4(codec_context, audio_frame.get(),
- &got_audio, &packet);
- base::TimeDelta delta = base::TimeTicks::HighResNow() - decode_start;
-
- if (got_audio) {
- size_out = av_samples_get_buffer_size(
- NULL, codec_context->channels, audio_frame->nb_samples,
- codec_context->sample_fmt, 1);
- }
-
- if (got_audio && size_out) {
- decode_times.push_back(delta.InMillisecondsF());
- ++frames;
- read_result = 0; // Force continuation.
-
- if (output) {
- if (fwrite(audio_frame->data[0], 1, size_out, output) !=
- static_cast<size_t>(size_out)) {
- std::cerr << "Error: Could not write "
- << size_out << " bytes for " << in_path.value()
- << std::endl;
- return 1;
- }
- }
-
- const uint8* u8_samples =
- reinterpret_cast<const uint8*>(audio_frame->data[0]);
- if (hash_djb2) {
- hash_value = DJB2Hash(u8_samples, size_out, hash_value);
- }
- if (hash_md5) {
- base::MD5Update(
- &ctx,
- base::StringPiece(reinterpret_cast<const char*>(u8_samples),
- size_out));
- }
- }
- } else if (target_codec == AVMEDIA_TYPE_VIDEO) {
- int got_picture = 0;
-
- avcodec_get_frame_defaults(video_frame.get());
-
- base::TimeTicks decode_start = base::TimeTicks::HighResNow();
- result = avcodec_decode_video2(codec_context, video_frame.get(),
- &got_picture, &packet);
- base::TimeDelta delta = base::TimeTicks::HighResNow() - decode_start;
-
- if (got_picture) {
- decode_times.push_back(delta.InMillisecondsF());
- ++frames;
- read_result = 0; // Force continuation.
-
- for (int plane = 0; plane < 3; ++plane) {
- const uint8* source = video_frame->data[plane];
- const size_t source_stride = video_frame->linesize[plane];
- size_t bytes_per_line = codec_context->width;
- size_t copy_lines = codec_context->height;
- if (plane != 0) {
- switch (codec_context->pix_fmt) {
- case PIX_FMT_YUV420P:
- case PIX_FMT_YUVJ420P:
- bytes_per_line /= 2;
- copy_lines = (copy_lines + 1) / 2;
- break;
- case PIX_FMT_YUV422P:
- case PIX_FMT_YUVJ422P:
- bytes_per_line /= 2;
- break;
- case PIX_FMT_YUV444P:
- case PIX_FMT_YUVJ444P:
- break;
- default:
- std::cerr << "Error: Unknown video format "
- << codec_context->pix_fmt;
- return 1;
- }
- }
- if (output) {
- for (size_t i = 0; i < copy_lines; ++i) {
- if (fwrite(source, 1, bytes_per_line, output) !=
- bytes_per_line) {
- std::cerr << "Error: Could not write data after "
- << copy_lines << " lines for "
- << in_path.value() << std::endl;
- return 1;
- }
- source += source_stride;
- }
- }
- if (hash_djb2) {
- for (size_t i = 0; i < copy_lines; ++i) {
- hash_value = DJB2Hash(source, bytes_per_line, hash_value);
- source += source_stride;
- }
- }
- if (hash_md5) {
- for (size_t i = 0; i < copy_lines; ++i) {
- base::MD5Update(
- &ctx,
- base::StringPiece(reinterpret_cast<const char*>(source),
- bytes_per_line));
- source += source_stride;
- }
- }
- }
- }
- } else {
- NOTREACHED();
- }
-
- // Make sure our decoding went OK.
- if (result < 0) {
- std::cerr << "Error: avcodec_decode returned "
- << result << " for " << in_path.value() << std::endl;
- return 1;
- }
- }
- // Free our packet.
- av_free_packet(&packet);
-
- if (max_frames && (frames >= max_frames))
- break;
- } while (read_result >= 0);
- base::TimeDelta total = base::TimeTicks::HighResNow() - start;
- LeaveTimingSection();
-
- // Clean up.
- if (output)
- file_util::CloseFile(output);
-
- // Calculate the sum of times. Note that some of these may be zero.
- double sum = 0;
- for (size_t i = 0; i < decode_times.size(); ++i) {
- sum += decode_times[i];
- }
-
- double average = 0;
- double stddev = 0;
- double fps = 0;
- if (frames > 0) {
- // Calculate the average time per frame.
- average = sum / frames;
-
- // Calculate the sum of the squared differences.
- // Standard deviation will only be accurate if no threads are used.
- // TODO(fbarchard): Rethink standard deviation calculation.
- double squared_sum = 0;
- for (int i = 0; i < frames; ++i) {
- double difference = decode_times[i] - average;
- squared_sum += difference * difference;
- }
-
- // Calculate the standard deviation (jitter).
- stddev = sqrt(squared_sum / frames);
-
- // Calculate frames per second.
- fps = frames * 1000.0 / sum;
- }
-
- // Print our results.
- log_out->setf(std::ios::fixed);
- log_out->precision(2);
- *log_out << std::endl;
- *log_out << " Frames:" << std::setw(11) << frames << std::endl;
- *log_out << " Width:" << std::setw(11) << video_width << std::endl;
- *log_out << " Height:" << std::setw(11) << video_height << std::endl;
- *log_out << " Total:" << std::setw(11) << total.InMillisecondsF()
- << " ms" << std::endl;
- *log_out << " Summation:" << std::setw(11) << sum
- << " ms" << std::endl;
- *log_out << " Average:" << std::setw(11) << average
- << " ms" << std::endl;
- *log_out << " StdDev:" << std::setw(11) << stddev
- << " ms" << std::endl;
- *log_out << " FPS:" << std::setw(11) << fps
- << std::endl;
- if (hash_djb2) {
- *log_out << " DJB2 Hash:" << std::setw(11) << hash_value
- << " " << in_path.value() << std::endl;
- }
- if (hash_md5) {
- base::MD5Digest digest; // The result of the computation.
- base::MD5Final(&digest, &ctx);
- *log_out << " MD5 Hash: " << base::MD5DigestToBase16(digest)
- << " " << in_path.value() << std::endl;
- }
-#if defined(ENABLE_WINDOWS_EXCEPTIONS)
- } __except(EXCEPTION_EXECUTE_HANDLER) {
- *log_out << " Exception:" << std::setw(11) << GetExceptionCode()
- << " " << in_path.value() << std::endl;
- return 1;
- }
-#endif
- CommandLine::Reset();
- return 0;
-}
diff --git a/chromium/media/tools/player_x11/player_x11.cc b/chromium/media/tools/player_x11/player_x11.cc
index cef891247a6..c154e6937e2 100644
--- a/chromium/media/tools/player_x11/player_x11.cc
+++ b/chromium/media/tools/player_x11/player_x11.cc
@@ -95,8 +95,8 @@ void Paint(base::MessageLoop* message_loop, const PaintCB& paint_cb,
static void OnBufferingState(media::Pipeline::BufferingState buffering_state) {}
-static void NeedKey(const std::string& type, scoped_ptr<uint8[]> init_data,
- int init_data_size) {
+static void NeedKey(const std::string& type,
+ const std::vector<uint8>& init_data) {
std::cout << "File is encrypted." << std::endl;
}
diff --git a/chromium/media/tools/seek_tester/seek_tester.cc b/chromium/media/tools/seek_tester/seek_tester.cc
deleted file mode 100644
index d3f6a35044e..00000000000
--- a/chromium/media/tools/seek_tester/seek_tester.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// This standalone binary is a helper for diagnosing seek behavior of the
-// demuxer setup in media/ code. It answers the question: "if I ask the demuxer
-// to Seek to X ms, where will it actually seek to? (necessitating
-// frame-dropping until the original seek target is reached)". Sample run:
-//
-// $ ./out/Debug/seek_tester .../LayoutTests/media/content/test.ogv 6300
-// [0207/130327:INFO:seek_tester.cc(63)] Requested: 6123ms
-// [0207/130327:INFO:seek_tester.cc(68)] audio seeked to: 5526ms
-// [0207/130327:INFO:seek_tester.cc(74)] video seeked to: 5577ms
-
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/files/file_path.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_number_conversions.h"
-#include "media/base/media.h"
-#include "media/base/media_log.h"
-#include "media/filters/ffmpeg_demuxer.h"
-#include "media/filters/file_data_source.h"
-
-class DemuxerHostImpl : public media::DemuxerHost {
- public:
- // DataSourceHost implementation.
- virtual void SetTotalBytes(int64 total_bytes) OVERRIDE {}
- virtual void AddBufferedByteRange(int64 start, int64 end) OVERRIDE {}
- virtual void AddBufferedTimeRange(base::TimeDelta start,
- base::TimeDelta end) OVERRIDE {}
-
- // DemuxerHost implementation.
- virtual void SetDuration(base::TimeDelta duration) OVERRIDE {}
- virtual void OnDemuxerError(media::PipelineStatus error) OVERRIDE {}
-};
-
-void QuitMessageLoop(base::MessageLoop* loop, media::PipelineStatus status) {
- CHECK_EQ(status, media::PIPELINE_OK);
- loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
-}
-
-void TimestampExtractor(uint64* timestamp_ms,
- base::MessageLoop* loop,
- media::DemuxerStream::Status status,
- const scoped_refptr<media::DecoderBuffer>& buffer) {
- CHECK_EQ(status, media::DemuxerStream::kOk);
- if (buffer->timestamp() == media::kNoTimestamp())
- *timestamp_ms = -1;
- else
- *timestamp_ms = buffer->timestamp().InMillisecondsF();
- loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
-}
-
-static void NeedKey(const std::string& type, scoped_ptr<uint8[]> init_data,
- int init_data_size) {
- LOG(INFO) << "File is encrypted.";
-}
-
-int main(int argc, char** argv) {
- base::AtExitManager at_exit;
- media::InitializeMediaLibraryForTesting();
-
- CHECK_EQ(argc, 3) << "\nUsage: " << argv[0] << " <file> <seekTimeInMs>";
- uint64 seek_target_ms;
- CHECK(base::StringToUint64(argv[2], &seek_target_ms));
- scoped_ptr<media::FileDataSource> file_data_source(
- new media::FileDataSource());
- CHECK(file_data_source->Initialize(base::FilePath::FromUTF8Unsafe(argv[1])));
-
- DemuxerHostImpl host;
- base::MessageLoop loop;
- media::PipelineStatusCB quitter = base::Bind(&QuitMessageLoop, &loop);
- media::FFmpegNeedKeyCB need_key_cb = base::Bind(&NeedKey);
- scoped_ptr<media::FFmpegDemuxer> demuxer(
- new media::FFmpegDemuxer(loop.message_loop_proxy(),
- file_data_source.get(),
- need_key_cb,
- new media::MediaLog()));
- demuxer->Initialize(&host, quitter);
- loop.Run();
-
- demuxer->Seek(base::TimeDelta::FromMilliseconds(seek_target_ms), quitter);
- loop.Run();
-
- uint64 audio_seeked_to_ms;
- uint64 video_seeked_to_ms;
- media::DemuxerStream* audio_stream =
- demuxer->GetStream(media::DemuxerStream::AUDIO);
- media::DemuxerStream* video_stream =
- demuxer->GetStream(media::DemuxerStream::VIDEO);
- LOG(INFO) << "Requested: " << seek_target_ms << "ms";
- if (audio_stream) {
- audio_stream->Read(base::Bind(
- &TimestampExtractor, &audio_seeked_to_ms, &loop));
- loop.Run();
- LOG(INFO) << " audio seeked to: " << audio_seeked_to_ms << "ms";
- }
- if (video_stream) {
- video_stream->Read(
- base::Bind(&TimestampExtractor, &video_seeked_to_ms, &loop));
- loop.Run();
- LOG(INFO) << " video seeked to: " << video_seeked_to_ms << "ms";
- }
-
- demuxer->Stop(base::Bind(&base::MessageLoop::Quit, base::Unretained(&loop)));
- loop.Run();
-
- return 0;
-}
diff --git a/chromium/media/tools/shader_bench/cpu_color_painter.cc b/chromium/media/tools/shader_bench/cpu_color_painter.cc
deleted file mode 100644
index a7cb570cdf9..00000000000
--- a/chromium/media/tools/shader_bench/cpu_color_painter.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/yuv_convert.h"
-#include "media/tools/shader_bench/cpu_color_painter.h"
-
-enum { kNumRGBPlanes = 1 };
-
-// Pass-through vertex shader.
-static const char kVertexShader[] =
- "precision highp float;\n"
- "precision highp int;\n"
- "varying vec2 interp_tc;\n"
- "\n"
- "attribute vec4 in_pos;\n"
- "attribute vec2 in_tc;\n"
- "\n"
- "void main() {\n"
- " interp_tc = in_tc;\n"
- " gl_Position = in_pos;\n"
- "}\n";
-
-// RGB pixel shader.
-static const char kFragmentShader[] =
- "precision mediump float;\n"
- "precision mediump int;\n"
- "varying vec2 interp_tc;\n"
- "\n"
- "uniform sampler2D rgba_tex;\n"
- "\n"
- "void main() {\n"
- " vec4 texColor = texture2D(rgba_tex, interp_tc);"
- " gl_FragColor = vec4(texColor.z, texColor.y, texColor.x, texColor.w);\n"
- "}\n";
-
-CPUColorPainter::CPUColorPainter()
- : program_id_(-1) {
-}
-
-CPUColorPainter::~CPUColorPainter() {
- if (program_id_) {
- glDeleteProgram(program_id_);
- glDeleteTextures(kNumRGBPlanes, textures_);
- }
-}
-
-void CPUColorPainter::Initialize(int width, int height) {
- glGenTextures(kNumRGBPlanes, textures_);
- glActiveTexture(GL_TEXTURE0);
- glBindTexture(GL_TEXTURE_2D, textures_[0]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0,
- GL_RGBA, GL_UNSIGNED_BYTE, 0);
-
- GLuint program = CreateShaderProgram(kVertexShader, kFragmentShader);
-
- // Bind parameters.
- glUniform1i(glGetUniformLocation(program, "rgba_tex"), 0);
- program_id_ = program;
-}
-
-void CPUColorPainter::Paint(scoped_refptr<media::VideoFrame> video_frame) {
- // Convert to RGB32 frame.
- scoped_refptr<media::VideoFrame> rgba_frame =
- media::VideoFrame::CreateFrame(media::VideoFrame::RGB32,
- video_frame->coded_size(),
- video_frame->visible_rect(),
- video_frame->natural_size(),
- base::TimeDelta());
-
- media::ConvertYUVToRGB32(video_frame->data(media::VideoFrame::kYPlane),
- video_frame->data(media::VideoFrame::kUPlane),
- video_frame->data(media::VideoFrame::kVPlane),
- rgba_frame->data(0),
- video_frame->coded_size().width(),
- video_frame->coded_size().height(),
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->stride(media::VideoFrame::kUPlane),
- rgba_frame->stride(0),
- media::YV12);
-
- glBindTexture(GL_TEXTURE_2D, textures_[0]);
- // Not accounting for x/y offset presently.
- glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
- rgba_frame->visible_rect().width(),
- rgba_frame->visible_rect().height(),
- GL_RGBA, GL_UNSIGNED_BYTE,
- rgba_frame->data(0));
-
- glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
- surface()->SwapBuffers();
-}
diff --git a/chromium/media/tools/shader_bench/cpu_color_painter.h b/chromium/media/tools/shader_bench/cpu_color_painter.h
deleted file mode 100644
index 7aba3cda861..00000000000
--- a/chromium/media/tools/shader_bench/cpu_color_painter.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_SHADER_BENCH_CPU_COLOR_PAINTER_H_
-#define MEDIA_TOOLS_SHADER_BENCH_CPU_COLOR_PAINTER_H_
-
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/video_frame.h"
-#include "media/tools/shader_bench/gpu_painter.h"
-
-// Does color conversion using CPU, rendering on GPU.
-class CPUColorPainter : public GPUPainter {
- public:
- CPUColorPainter();
- virtual ~CPUColorPainter();
-
- // Painter interface.
- virtual void Initialize(int width, int height) OVERRIDE;
- virtual void Paint(scoped_refptr<media::VideoFrame> video_frame) OVERRIDE;
-
- private:
- // Shader program id.
- GLuint program_id_;
-
- // ID of rgba texture.
- GLuint textures_[1];
-
- DISALLOW_COPY_AND_ASSIGN(CPUColorPainter);
-};
-
-#endif // MEDIA_TOOLS_SHADER_BENCH_CPU_COLOR_PAINTER_H_
diff --git a/chromium/media/tools/shader_bench/gpu_color_painter.cc b/chromium/media/tools/shader_bench/gpu_color_painter.cc
deleted file mode 100644
index 17155ee009d..00000000000
--- a/chromium/media/tools/shader_bench/gpu_color_painter.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/tools/shader_bench/gpu_color_painter.h"
-#include "ui/gl/gl_context.h"
-
-enum { kNumYUVPlanes = 3 };
-
-// Matrix used for the YUV to RGB conversion.
-static const float kYUV2RGB[9] = {
- 1.f, 0.f, 1.403f,
- 1.f, -.344f, -.714f,
- 1.f, 1.772f, 0.f,
-};
-
-static const float kYUV2RGB_TRANS[9] = {
- 1.f, 1.f, 1.f,
- 0.f, -.344f, 1.772f,
- 1.403f, -.714f, 0.f,
-};
-
-// Pass-through vertex shader.
-static const char kVertexShader[] =
- "precision highp float;\n"
- "precision highp int;\n"
- "varying vec2 interp_tc;\n"
- "\n"
- "attribute vec4 in_pos;\n"
- "attribute vec2 in_tc;\n"
- "\n"
- "void main() {\n"
- " interp_tc = in_tc;\n"
- " gl_Position = in_pos;\n"
- "}\n";
-
-// YUV to RGB pixel shader. Loads a pixel from each plane and pass through the
-// matrix.
-static const char kFragmentShader[] =
- "precision mediump float;\n"
- "precision mediump int;\n"
- "varying vec2 interp_tc;\n"
- "\n"
- "uniform sampler2D y_tex;\n"
- "uniform sampler2D u_tex;\n"
- "uniform sampler2D v_tex;\n"
- "uniform mat3 yuv2rgb;\n"
- "\n"
- "void main() {\n"
- " float y = texture2D(y_tex, interp_tc).x;\n"
- " float u = texture2D(u_tex, interp_tc).r - .5;\n"
- " float v = texture2D(v_tex, interp_tc).r - .5;\n"
- " vec3 rgb = yuv2rgb * vec3(y, u, v);\n"
- " gl_FragColor = vec4(rgb, 1);\n"
- "}\n";
-
-GPUColorWithLuminancePainter::GPUColorWithLuminancePainter()
- : program_id_(-1) {
-}
-
-GPUColorWithLuminancePainter::~GPUColorWithLuminancePainter() {
- if (program_id_) {
- glDeleteProgram(program_id_);
- glDeleteTextures(kNumYUVPlanes, textures_);
- }
-}
-
-void GPUColorWithLuminancePainter::Initialize(int width, int height) {
- // Create 3 textures, one for each plane, and bind them to different
- // texture units.
- glGenTextures(kNumYUVPlanes, textures_);
-
- for (unsigned int i = 0; i < kNumYUVPlanes; ++i) {
- unsigned int texture_width = (i == media::VideoFrame::kYPlane) ?
- width : width / 2;
- unsigned int texture_height = (i == media::VideoFrame::kYPlane) ?
- height : height / 2;
- glActiveTexture(GL_TEXTURE0 + i);
- glBindTexture(GL_TEXTURE_2D, textures_[i]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, texture_width, texture_height,
- 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0);
- }
-
- GLuint program = CreateShaderProgram(kVertexShader, kFragmentShader);
-
- // Bind parameters.
- glUniform1i(glGetUniformLocation(program, "y_tex"), 0);
- glUniform1i(glGetUniformLocation(program, "u_tex"), 1);
- glUniform1i(glGetUniformLocation(program, "v_tex"), 2);
- int yuv2rgb_location = glGetUniformLocation(program, "yuv2rgb");
-
- // DesktopGL supports transpose matrices.
- if (gfx::GetGLImplementation() == gfx::kGLImplementationDesktopGL)
- glUniformMatrix3fv(yuv2rgb_location, 1, GL_TRUE, kYUV2RGB);
- else
- glUniformMatrix3fv(yuv2rgb_location, 1, GL_FALSE, kYUV2RGB_TRANS);
-
- program_id_ = program;
-}
-
-void GPUColorWithLuminancePainter::Paint(
- scoped_refptr<media::VideoFrame> video_frame) {
- // Not accounting for x/y offset presently.
- int width = video_frame->visible_rect().width();
- int height = video_frame->visible_rect().height();
- for (unsigned int i = 0; i < kNumYUVPlanes; ++i) {
- unsigned int plane_width =
- (i == media::VideoFrame::kYPlane) ? width : width / 2;
- unsigned int plane_height =
- (i == media::VideoFrame::kYPlane) ? height : height / 2;
- glBindTexture(GL_TEXTURE_2D, textures_[i]);
- glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, plane_width, plane_height,
- GL_LUMINANCE, GL_UNSIGNED_BYTE, video_frame->data(i));
- }
-
- glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
- surface()->SwapBuffers();
-}
diff --git a/chromium/media/tools/shader_bench/gpu_color_painter.h b/chromium/media/tools/shader_bench/gpu_color_painter.h
deleted file mode 100644
index 63c6f52a288..00000000000
--- a/chromium/media/tools/shader_bench/gpu_color_painter.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_SHADER_BENCH_GPU_COLOR_PAINTER_H_
-#define MEDIA_TOOLS_SHADER_BENCH_GPU_COLOR_PAINTER_H_
-
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/video_frame.h"
-#include "media/tools/shader_bench/gpu_painter.h"
-#include "ui/gl/gl_context.h"
-
-// Does color space conversion using luminance textures on GPU,
-// renders using GPU.
-class GPUColorWithLuminancePainter : public GPUPainter {
- public:
- GPUColorWithLuminancePainter();
- virtual ~GPUColorWithLuminancePainter();
-
- // Painter interface.
- virtual void Initialize(int width, int height) OVERRIDE;
- virtual void Paint(scoped_refptr<media::VideoFrame> video_frame) OVERRIDE;
-
- private:
- // Shader program id.
- GLuint program_id_;
-
- // IDs of 3 luminance textures.
- GLuint textures_[3];
-
- DISALLOW_COPY_AND_ASSIGN(GPUColorWithLuminancePainter);
-};
-
-#endif // MEDIA_TOOLS_SHADER_BENCH_GPU_COLOR_PAINTER_H_
diff --git a/chromium/media/tools/shader_bench/gpu_painter.cc b/chromium/media/tools/shader_bench/gpu_painter.cc
deleted file mode 100644
index e6350119307..00000000000
--- a/chromium/media/tools/shader_bench/gpu_painter.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "media/tools/shader_bench/gpu_painter.h"
-
-// Vertices for a full screen quad.
-static const float kVertices[8] = {
- -1.f, 1.f,
- -1.f, -1.f,
- 1.f, 1.f,
- 1.f, -1.f,
-};
-
-// Texture Coordinates mapping the entire texture.
-static const float kTextureCoords[8] = {
- 0, 0,
- 0, 1,
- 1, 0,
- 1, 1,
-};
-
-// Buffer size for compile errors.
-static const unsigned int kErrorSize = 4096;
-
-GPUPainter::GPUPainter()
- : surface_(NULL),
- context_(NULL) {
-}
-
-GPUPainter::~GPUPainter() {
-}
-
-void GPUPainter::SetGLContext(gfx::GLSurface* surface,
- gfx::GLContext* context) {
- surface_ = surface;
- context_ = context;
-}
-
-GLuint GPUPainter::LoadShader(unsigned type, const char* shader_source) {
- GLuint shader = glCreateShader(type);
- glShaderSource(shader, 1, &shader_source, NULL);
- glCompileShader(shader);
- int result = GL_FALSE;
- glGetShaderiv(shader, GL_COMPILE_STATUS, &result);
- if (!result) {
- char log[kErrorSize];
- int len;
- glGetShaderInfoLog(shader, kErrorSize - 1, &len, log);
- log[kErrorSize - 1] = 0;
- LOG(FATAL) << "Shader did not compile: " << log;
- }
- return shader;
-}
-
-GLuint GPUPainter::CreateShaderProgram(const char* vertex_shader_source,
- const char* fragment_shader_source) {
-
- // Create vertex and pixel shaders.
- GLuint vertex_shader = LoadShader(GL_VERTEX_SHADER, vertex_shader_source);
- GLuint fragment_shader =
- LoadShader(GL_FRAGMENT_SHADER, fragment_shader_source);
-
- // Create program and attach shaders.
- GLuint program = glCreateProgram();
- glAttachShader(program, vertex_shader);
- glAttachShader(program, fragment_shader);
- glDeleteShader(vertex_shader);
- glDeleteShader(fragment_shader);
- glLinkProgram(program);
- int result = GL_FALSE;
- glGetProgramiv(program, GL_LINK_STATUS, &result);
- if (!result) {
- char log[kErrorSize];
- int len;
- glGetProgramInfoLog(program, kErrorSize - 1, &len, log);
- log[kErrorSize - 1] = 0;
- LOG(FATAL) << "Program did not link: " << log;
- }
- glUseProgram(program);
-
- // Set common vertex parameters.
- int pos_location = glGetAttribLocation(program, "in_pos");
- glEnableVertexAttribArray(pos_location);
- glVertexAttribPointer(pos_location, 2, GL_FLOAT, GL_FALSE, 0, kVertices);
-
- int tc_location = glGetAttribLocation(program, "in_tc");
- glEnableVertexAttribArray(tc_location);
- glVertexAttribPointer(tc_location, 2, GL_FLOAT, GL_FALSE, 0, kTextureCoords);
- return program;
-}
diff --git a/chromium/media/tools/shader_bench/gpu_painter.h b/chromium/media/tools/shader_bench/gpu_painter.h
deleted file mode 100644
index e68305bd000..00000000000
--- a/chromium/media/tools/shader_bench/gpu_painter.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_SHADER_BENCH_GPU_PAINTER_H_
-#define MEDIA_TOOLS_SHADER_BENCH_GPU_PAINTER_H_
-
-#include "media/tools/shader_bench/painter.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_implementation.h"
-#include "ui/gl/gl_surface.h"
-
-// Class that renders video frames to a window via GPU.
-class GPUPainter : public Painter {
- public:
- GPUPainter();
- virtual ~GPUPainter();
-
- // Returns a reference to the GL context.
- gfx::GLSurface* surface() const { return surface_; }
-
- // Sets context for subsequent gl calls in this painter.
- virtual void SetGLContext(gfx::GLSurface* surface, gfx::GLContext* context);
-
- // Creates shader program into given context, from the vertex and fragment
- // shader source code. Returns the id of the shader program.
- virtual GLuint CreateShaderProgram(const char* vertex_shader_source,
- const char* fragment_shader_source);
-
- private:
- // Loads shader into given context, from the source code of the
- // shader. type refers to the shader type, either GL_VERTEX_SHADER or
- // GL_FRAGMENT_SHADER. Returns id of shader.
- GLuint LoadShader(unsigned type, const char* shader_source);
-
- // Reference to the gl context.
- gfx::GLSurface* surface_;
- gfx::GLContext* context_;
-
- DISALLOW_COPY_AND_ASSIGN(GPUPainter);
-};
-
-#endif // MEDIA_TOOLS_SHADER_BENCH_GPU_PAINTER_H_
diff --git a/chromium/media/tools/shader_bench/painter.cc b/chromium/media/tools/shader_bench/painter.cc
deleted file mode 100644
index ab8fc593473..00000000000
--- a/chromium/media/tools/shader_bench/painter.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/tools/shader_bench/painter.h"
-
-Painter::Painter()
- : frames_(NULL) {
-}
-
-Painter::~Painter() {
-}
-
-void Painter::OnPaint() {
- if (frames_ && !frames_->empty()) {
- scoped_refptr<media::VideoFrame> cur_frame = frames_->front();
- Paint(cur_frame);
- frames_->pop_front();
- frames_->push_back(cur_frame);
- }
-}
-
-void Painter::LoadFrames(
- std::deque<scoped_refptr<media::VideoFrame> >* frames) {
- frames_ = frames;
-}
diff --git a/chromium/media/tools/shader_bench/painter.h b/chromium/media/tools/shader_bench/painter.h
deleted file mode 100644
index 2dd92ab1248..00000000000
--- a/chromium/media/tools/shader_bench/painter.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_SHADER_BENCH_PAINTER_H_
-#define MEDIA_TOOLS_SHADER_BENCH_PAINTER_H_
-
-#include <deque>
-
-#include "base/memory/ref_counted.h"
-#include "media/base/video_frame.h"
-
-// Class that paints video frames to a window.
-class Painter {
- public:
- Painter();
- virtual ~Painter();
-
- // Loads frames into Painter. Painter does not take ownership of frames.
- virtual void LoadFrames(
- std::deque<scoped_refptr<media::VideoFrame> >* frames);
-
- // Called window is ready to be painted.
- virtual void OnPaint();
-
- // Initialize a Painter class with a width and a height
- virtual void Initialize(int width, int height) = 0;
-
- // Paint a single frame to a window.
- virtual void Paint(scoped_refptr<media::VideoFrame> video_frame) = 0;
-
- private:
- // Frames that the Painter will paint.
- std::deque<scoped_refptr<media::VideoFrame> >* frames_;
-
- DISALLOW_COPY_AND_ASSIGN(Painter);
-};
-
-#endif // MEDIA_TOOLS_SHADER_BENCH_PAINTER_H_
diff --git a/chromium/media/tools/shader_bench/shader_bench.cc b/chromium/media/tools/shader_bench/shader_bench.cc
deleted file mode 100644
index b26733ce032..00000000000
--- a/chromium/media/tools/shader_bench/shader_bench.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <deque>
-#include <ostream>
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/time/time.h"
-#include "media/base/video_frame.h"
-#include "media/tools/shader_bench/cpu_color_painter.h"
-#include "media/tools/shader_bench/gpu_color_painter.h"
-#include "media/tools/shader_bench/painter.h"
-#include "media/tools/shader_bench/window.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_implementation.h"
-#include "ui/gl/gl_surface.h"
-
-#if defined(TOOLKIT_GTK)
-#include <gtk/gtk.h>
-#endif
-
-static const int kNumFramesToPaint = 500;
-static base::TimeTicks g_start_;
-static base::TimeTicks g_end_;
-
-long CalculateYUVFrameSize(FILE* file_handle, int num_frames) {
- fseek(file_handle, 0, SEEK_END);
- long file_size = (long) ftell(file_handle);
- rewind(file_handle);
- return file_size / num_frames;
-}
-
-void GetFrames(std::string file_name,
- int width, int height, int num_frames,
- std::deque<scoped_refptr<media::VideoFrame> >& out_frames) {
- FILE* file_handle = fopen(file_name.c_str(), "rb");
- if (!file_handle) {
- printf("Could not open %s\n", file_name.c_str());
- exit(1);
- }
-
- long frame_size = CalculateYUVFrameSize(file_handle, num_frames);
-
- gfx::Size size(width, height);
- for (int i = 0; i < num_frames; i++) {
- scoped_refptr<media::VideoFrame> video_frame =
- media::VideoFrame::CreateFrame(media::VideoFrame::YV12, size,
- gfx::Rect(size), size,
- base::TimeDelta());
- long bytes_read =
- fread(video_frame->data(0), 1, frame_size, file_handle);
-
- if (bytes_read != frame_size) {
- printf("Could not read %s\n", file_name.c_str());
- fclose(file_handle);
- exit(1);
- }
- out_frames.push_back(video_frame);
- }
-
- fclose(file_handle);
-}
-
-void TestFinished() {
- g_end_ = base::TimeTicks::HighResNow();
- double time_in_seconds =
- static_cast<double>((g_end_ - g_start_).InMilliseconds()) / 1000;
- double fps = kNumFramesToPaint / time_in_seconds;
- printf("Printed %f frames per second.\n", fps);
-}
-
-void RunTest(media::Window* window, Painter* painter) {
- g_start_ = base::TimeTicks::HighResNow();
- window->Start(kNumFramesToPaint, base::Bind(&TestFinished), painter);
-}
-
-int main(int argc, char** argv) {
- // Read arguments.
- if (argc == 1) {
- printf("Usage: %s --file=FILE --wxh=DIMENSIONS --frames=NUM_FRAMES\n"
- "FILE is a raw .yuv file with 1+ frames in it\n"
- "DIMENSIONS is the width and height of the frame in pixels\n"
- "NUM_FRAMES is the number of frames in FILE\n", argv[0]);
- return 1;
- }
-
- // Read command line.
-#if defined(TOOLKIT_GTK)
- gtk_init(&argc, &argv);
-#endif
- CommandLine::Init(argc, argv);
-
- // Determine file name.
- std::string file_name =
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII("file");
-
- // Determine number of frames.
- int num_frames = 0;
- std::string str_num_frames =
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII("frames");
- base::StringToInt(str_num_frames, &num_frames);
-
- // Determine video dimensions.
- int width = 0;
- int height = 0;
- std::string dimensions =
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII("wxh");
- int x_index = dimensions.find('x');
- std::string str_width = dimensions.substr(0, x_index);
- std::string str_height =
- dimensions.substr(x_index + 1, dimensions.length() - x_index - 1);
- base::StringToInt(str_width, &width);
- base::StringToInt(str_height, &height);
-
- // Process files.
- std::deque<scoped_refptr<media::VideoFrame> > frames;
- GetFrames(file_name, width, height, num_frames, frames);
-
- // Initialize window and graphics context.
- base::AtExitManager at_exit_manager;
- gfx::GLSurface::InitializeOneOff();
- scoped_ptr<media::Window> window(new media::Window(width, height));
- gfx::GLSurface* surface =
- gfx::GLSurface::CreateViewGLSurface(window->PluginWindow()).get();
- gfx::GLContext* context = gfx::GLContext::CreateGLContext(
- NULL, surface, gfx::PreferDiscreteGpu).get();
- context->MakeCurrent(surface);
- // This sets D3DPRESENT_INTERVAL_IMMEDIATE on Windows.
- context->SetSwapInterval(0);
-
- // Initialize and name GPU painters.
- static const struct {
- const char* name;
- GPUPainter* painter;
- } painters[] = {
- { "CPU CSC + GPU Render", new CPUColorPainter() },
- { "GPU CSC/Render", new GPUColorWithLuminancePainter() },
- };
-
- // Run GPU painter tests.
- for (size_t i = 0; i < ARRAYSIZE_UNSAFE(painters); i++) {
- scoped_ptr<GPUPainter> painter(painters[i].painter);
- painter->LoadFrames(&frames);
- painter->SetGLContext(surface, context);
- painter->Initialize(width, height);
- printf("Running %s tests...", painters[i].name);
- RunTest(window.get(), painter.get());
- }
-
- return 0;
-}
diff --git a/chromium/media/tools/shader_bench/window.cc b/chromium/media/tools/shader_bench/window.cc
deleted file mode 100644
index 3eb26f4d2db..00000000000
--- a/chromium/media/tools/shader_bench/window.cc
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/tools/shader_bench/window.h"
-
-namespace media {
-
-Window::Window(int width, int height)
- : painter_(NULL),
- limit_(0),
- count_(0),
- running_(false) {
- window_handle_ = CreateNativeWindow(width, height);
-}
-
-Window::~Window() {}
-
-} // namespace media
diff --git a/chromium/media/tools/shader_bench/window.h b/chromium/media/tools/shader_bench/window.h
deleted file mode 100644
index d66e849cd3c..00000000000
--- a/chromium/media/tools/shader_bench/window.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_TOOLS_SHADER_BENCH_WINDOW_H_
-#define MEDIA_TOOLS_SHADER_BENCH_WINDOW_H_
-
-#include "base/callback.h"
-#include "ui/gfx/native_widget_types.h"
-
-class Painter;
-
-namespace media {
-
-class Window {
- public:
- Window(int width, int height);
- ~Window();
-
- // Creates and returns a handle to a native window of the given dimensions.
- gfx::NativeWindow CreateNativeWindow(int width, int height);
-
- // Returns the NPAPI plugin window handle of the window.
- gfx::PluginWindowHandle PluginWindow();
-
- // Kicks off frame painting with the given limit, painter, and
- // callback to run when painting task is complete.
- void Start(int limit, const base::Closure& callback, Painter* painter);
-
- // Called when window is expected to paint self.
- void OnPaint();
-
- // Main loop for window.
- void MainLoop();
-
- private:
- // Closure to run when frame painting is completed. Will be reset after
- // running.
- base::Closure callback_;
-
- // Reference to painter Window uses to paint frames.
- Painter* painter_;
-
- // Number of frames to paint before closing the window.
- int limit_;
-
- // Number of frames currently painted.
- int count_;
-
- // True if the window is painting video frames to the screen, false otherwise.
- bool running_;
-
- // This window's native handle.
- gfx::NativeWindow window_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(Window);
-};
-
-} // namespace media
-
-#endif // MEDIA_TOOLS_SHADER_BENCH_WINDOW_H_
diff --git a/chromium/media/tools/shader_bench/window_linux.cc b/chromium/media/tools/shader_bench/window_linux.cc
deleted file mode 100644
index a0a34932c50..00000000000
--- a/chromium/media/tools/shader_bench/window_linux.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/tools/shader_bench/window.h"
-
-#include "media/tools/shader_bench/painter.h"
-
-#include <gdk/gdkx.h>
-#include <gtk/gtk.h>
-
-namespace media {
-
-static gboolean OnDelete(GtkWidget* widget, GdkEventExpose* event) {
- gtk_main_quit();
- return FALSE;
-}
-
-static gboolean OnExpose(GtkWidget* widget,
- GdkEventExpose* event,
- gpointer data) {
- Window* window = reinterpret_cast<Window*>(data);
- if (window)
- window->OnPaint();
- return FALSE;
-}
-
-gfx::NativeWindow Window::CreateNativeWindow(int width, int height) {
- GtkWidget* hwnd = gtk_window_new(GTK_WINDOW_TOPLEVEL);
-
- gtk_window_set_default_size(GTK_WINDOW(hwnd), width, height);
- gtk_widget_set_double_buffered(hwnd, FALSE);
- gtk_widget_set_app_paintable(hwnd, TRUE);
- gtk_widget_show(hwnd);
-
- return GTK_WINDOW(hwnd);
-}
-
-gfx::PluginWindowHandle Window::PluginWindow() {
- return GDK_WINDOW_XWINDOW(GTK_WIDGET(window_handle_)->window);
-}
-
-void Window::Start(int limit, const base::Closure& callback,
- Painter* painter) {
- running_ = true;
- count_ = 0;
- limit_ = limit;
- callback_ = callback;
- painter_ = painter;
-
- gtk_signal_connect(GTK_OBJECT(window_handle_),
- "delete_event",
- reinterpret_cast<GtkSignalFunc>(OnDelete),
- NULL);
-
- gtk_signal_connect(GTK_OBJECT(window_handle_),
- "expose_event",
- reinterpret_cast<GtkSignalFunc>(OnExpose),
- this);
-
- gtk_widget_queue_draw(GTK_WIDGET(window_handle_));
- MainLoop();
-}
-
-void Window::OnPaint() {
- if (!running_)
- return;
-
- if (count_ < limit_) {
- painter_->OnPaint();
- count_++;
- gtk_widget_queue_draw(GTK_WIDGET(window_handle_));
- } else {
- running_ = false;
- if (!callback_.is_null()) {
- callback_.Run();
- callback_.Reset();
- }
- gtk_main_quit();
- }
-}
-
-void Window::MainLoop() {
- gtk_main();
-}
-
-} // namespace media
diff --git a/chromium/media/tools/shader_bench/window_win.cc b/chromium/media/tools/shader_bench/window_win.cc
deleted file mode 100644
index abc6fc4e841..00000000000
--- a/chromium/media/tools/shader_bench/window_win.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/tools/shader_bench/window.h"
-
-#include "media/tools/shader_bench/painter.h"
-
-namespace media {
-
-static LRESULT CALLBACK WindowProc(HWND hwnd, UINT msg,
- WPARAM w_param, LPARAM l_param) {
- LRESULT result = 0;
- switch (msg) {
- case WM_CLOSE:
- ::DestroyWindow(hwnd);
- break;
- case WM_DESTROY:
- ::PostQuitMessage(0);
- break;
- case WM_ERASEBKGND:
- // Return a non-zero value to indicate that the background has been
- // erased.
- result = 1;
- break;
- case WM_PAINT: {
- Window* window =
- reinterpret_cast<Window*>(GetWindowLongPtr(hwnd, GWLP_USERDATA));
- if (window != NULL)
- window->OnPaint();
- ::ValidateRect(hwnd, NULL);
- break;
- }
- default:
- result = ::DefWindowProc(hwnd, msg, w_param, l_param);
- break;
- }
- return result;
-}
-
-gfx::NativeWindow Window::CreateNativeWindow(int width, int height) {
- WNDCLASS wnd_class = {0};
- HINSTANCE instance = GetModuleHandle(NULL);
- wnd_class.style = CS_OWNDC;
- wnd_class.lpfnWndProc = WindowProc;
- wnd_class.hInstance = instance;
- wnd_class.hbrBackground =
- reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH));
- wnd_class.lpszClassName = L"gpu_demo";
- if (!RegisterClass(&wnd_class))
- return NULL;
-
- DWORD wnd_style = WS_OVERLAPPED | WS_SYSMENU;
- RECT wnd_rect;
- wnd_rect.left = 0;
- wnd_rect.top = 0;
- wnd_rect.right = width;
- wnd_rect.bottom = height;
- AdjustWindowRect(&wnd_rect, wnd_style, FALSE);
-
- HWND hwnd = CreateWindow(
- wnd_class.lpszClassName,
- L"",
- wnd_style,
- 0,
- 0,
- wnd_rect.right - wnd_rect.left,
- wnd_rect.bottom - wnd_rect.top,
- NULL,
- NULL,
- instance,
- NULL);
- if (hwnd == NULL)
- return NULL;
-
- return hwnd;
-}
-
-gfx::PluginWindowHandle Window::PluginWindow() {
- return window_handle_;
-}
-
-void Window::Start(int limit, const base::Closure& callback,
- Painter* painter) {
- running_ = true;
- count_ = 0;
- limit_ = limit;
- callback_ = callback;
- painter_ = painter;
-
- SetWindowLongPtr(window_handle_, GWLP_USERDATA,
- reinterpret_cast<LONG_PTR>(this));
-
- ShowWindow(window_handle_, SW_SHOWNORMAL);
-
- // Post first invalidate call to kick off painting.
- ::InvalidateRect(window_handle_, NULL, FALSE);
-
- MainLoop();
-}
-
-void Window::OnPaint() {
- if (!running_)
- return;
-
- if (count_ < limit_) {
- painter_->OnPaint();
- count_++;
- } else {
- running_ = false;
- if (!callback_.is_null()) {
- ShowWindow(window_handle_, SW_HIDE);
- callback_.Run();
- callback_.Reset();
- }
- }
-}
-
-void Window::MainLoop() {
- MSG msg;
- bool done = false;
- while (!done) {
- while (::PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) {
- if (msg.message == WM_QUIT || !running_)
- done = true;
- ::TranslateMessage(&msg);
- ::DispatchMessage(&msg);
- if (!done)
- ::InvalidateRect(window_handle_, NULL, FALSE);
- }
- }
-}
-
-} // namespace media
diff --git a/chromium/media/video/capture/android/imageformat_list.h b/chromium/media/video/capture/android/imageformat_list.h
new file mode 100644
index 00000000000..57c5ba114f7
--- /dev/null
+++ b/chromium/media/video/capture/android/imageformat_list.h
@@ -0,0 +1,22 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file intentionally does not have header guards, it's included
+// inside a macro to generate enum and a java class for the values.
+
+#ifndef DEFINE_ANDROID_IMAGEFORMAT
+#error "DEFINE_ANDROID_IMAGEFORMAT should be defined."
+#endif
+
+// Android graphics ImageFormat mapping, see reference in:
+// http://developer.android.com/reference/android/graphics/ImageFormat.html
+
+DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_JPEG, 256)
+DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_NV16, 16)
+DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_NV21, 17)
+DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_RGB_565, 4)
+DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_YUY2, 20)
+DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_YV12, 842094169)
+
+DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_UNKNOWN, 0)
diff --git a/chromium/media/video/capture/android/video_capture_device_android.cc b/chromium/media/video/capture/android/video_capture_device_android.cc
index d4d73507798..141a5d0fae2 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.cc
+++ b/chromium/media/video/capture/android/video_capture_device_android.cc
@@ -21,19 +21,6 @@ using base::android::MethodID;
using base::android::JavaRef;
using base::android::ScopedJavaLocalRef;
-namespace {
-
-int GetIntField(JNIEnv* env,
- const JavaRef<jclass>& clazz,
- const JavaRef<jobject>& instance,
- const char* field_name) {
- jfieldID field = GetFieldID(env, clazz, field_name, "I");
- jint int_value = env->GetIntField(instance.obj(), field);
- return int_value;
-}
-
-} // namespace
-
namespace media {
// static
@@ -94,6 +81,7 @@ bool VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(JNIEnv* env) {
VideoCaptureDeviceAndroid::VideoCaptureDeviceAndroid(const Name& device_name)
: state_(kIdle),
+ got_first_frame_(false),
observer_(NULL),
device_name_(device_name),
current_settings_() {
@@ -151,10 +139,17 @@ void VideoCaptureDeviceAndroid::Allocate(
Java_VideoCapture_queryHeight(env, j_capture_.obj());
current_settings_.frame_rate =
Java_VideoCapture_queryFrameRate(env, j_capture_.obj());
- current_settings_.color = VideoCaptureCapability::kYV12;
+ current_settings_.color = GetColorspace();
+ DCHECK_NE(current_settings_.color, media::PIXEL_FORMAT_UNKNOWN);
CHECK(current_settings_.width > 0 && !(current_settings_.width % 2));
CHECK(current_settings_.height > 0 && !(current_settings_.height % 2));
+ if (capture_format.frame_rate > 0) {
+ frame_interval_ = base::TimeDelta::FromMicroseconds(
+ (base::Time::kMicrosecondsPerSecond + capture_format.frame_rate - 1) /
+ capture_format.frame_rate);
+ }
+
DVLOG(1) << "VideoCaptureDeviceAndroid::Allocate: queried width="
<< current_settings_.width
<< ", height="
@@ -169,6 +164,7 @@ void VideoCaptureDeviceAndroid::Start() {
DVLOG(1) << "VideoCaptureDeviceAndroid::Start";
{
base::AutoLock lock(lock_);
+ got_first_frame_ = false;
DCHECK_EQ(state_, kAllocated);
}
@@ -249,13 +245,46 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(
return;
}
- observer_->OnIncomingCapturedFrame(
- reinterpret_cast<uint8*>(buffer), length, base::Time::Now(),
- rotation, flip_vert, flip_horiz);
+ base::TimeTicks current_time = base::TimeTicks::Now();
+ if (!got_first_frame_) {
+ // Set aside one frame allowance for fluctuation.
+ expected_next_frame_time_ = current_time - frame_interval_;
+ got_first_frame_ = true;
+ }
+
+ // Deliver the frame when it doesn't arrive too early.
+ if (expected_next_frame_time_ <= current_time) {
+ expected_next_frame_time_ += frame_interval_;
+
+ observer_->OnIncomingCapturedFrame(
+ reinterpret_cast<uint8*>(buffer), length, base::Time::Now(),
+ rotation, flip_vert, flip_horiz);
+ }
env->ReleaseByteArrayElements(data, buffer, JNI_ABORT);
}
+VideoPixelFormat VideoCaptureDeviceAndroid::GetColorspace() {
+ JNIEnv* env = AttachCurrentThread();
+ int current_capture_colorspace =
+ Java_VideoCapture_getColorspace(env, j_capture_.obj());
+ switch (current_capture_colorspace){
+ case ANDROID_IMAGEFORMAT_YV12:
+ return media::PIXEL_FORMAT_YV12;
+ case ANDROID_IMAGEFORMAT_NV21:
+ return media::PIXEL_FORMAT_NV21;
+ case ANDROID_IMAGEFORMAT_YUY2:
+ return media::PIXEL_FORMAT_YUY2;
+ case ANDROID_IMAGEFORMAT_NV16:
+ case ANDROID_IMAGEFORMAT_JPEG:
+ case ANDROID_IMAGEFORMAT_RGB_565:
+ case ANDROID_IMAGEFORMAT_UNKNOWN:
+ // NOTE(mcasas): NV16, JPEG, RGB565 not supported in VideoPixelFormat.
+ default:
+ return media::PIXEL_FORMAT_UNKNOWN;
+ }
+}
+
void VideoCaptureDeviceAndroid::SetErrorState(const std::string& reason) {
LOG(ERROR) << "VideoCaptureDeviceAndroid::SetErrorState: " << reason;
{
diff --git a/chromium/media/video/capture/android/video_capture_device_android.h b/chromium/media/video/capture/android/video_capture_device_android.h
index 29a5fc7cf5d..de6955d9e8f 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.h
+++ b/chromium/media/video/capture/android/video_capture_device_android.h
@@ -11,6 +11,7 @@
#include "base/android/scoped_java_ref.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread.h"
+#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/video/capture/video_capture_device.h"
@@ -20,7 +21,7 @@ namespace media {
// by VideoCaptureManager on its own thread, while OnFrameAvailable is called
// on JAVA thread (i.e., UI thread). Both will access |state_| and |observer_|,
// but only VideoCaptureManager would change their value.
-class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
+class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
public:
virtual ~VideoCaptureDeviceAndroid();
@@ -53,14 +54,25 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
kError // Hit error. User needs to recover by destroying the object.
};
+ // Automatically generated enum to interface with Java world.
+ enum AndroidImageFormat {
+#define DEFINE_ANDROID_IMAGEFORMAT(name, value) name = value,
+#include "media/video/capture/android/imageformat_list.h"
+#undef DEFINE_ANDROID_IMAGEFORMAT
+ };
+
explicit VideoCaptureDeviceAndroid(const Name& device_name);
bool Init();
+ VideoPixelFormat GetColorspace();
void SetErrorState(const std::string& reason);
// Prevent racing on accessing |state_| and |observer_| since both could be
// accessed from different threads.
base::Lock lock_;
InternalState state_;
+ bool got_first_frame_;
+ base::TimeTicks expected_next_frame_time_;
+ base::TimeDelta frame_interval_;
VideoCaptureDevice::EventHandler* observer_;
Name device_name_;
diff --git a/chromium/media/video/capture/fake_video_capture_device.cc b/chromium/media/video/capture/fake_video_capture_device.cc
index 665f728b3c6..8434bc3ebbe 100644
--- a/chromium/media/video/capture/fake_video_capture_device.cc
+++ b/chromium/media/video/capture/fake_video_capture_device.cc
@@ -57,7 +57,8 @@ FakeVideoCaptureDevice::FakeVideoCaptureDevice(const Name& device_name)
observer_(NULL),
state_(kIdle),
capture_thread_("CaptureThread"),
- frame_count_(0) {
+ frame_count_(0),
+ capabilities_roster_index_(0) {
}
FakeVideoCaptureDevice::~FakeVideoCaptureDevice() {
@@ -78,7 +79,7 @@ void FakeVideoCaptureDevice::Allocate(
}
observer_ = observer;
- capture_format_.color = VideoCaptureCapability::kI420;
+ capture_format_.color = PIXEL_FORMAT_I420;
capture_format_.expected_capture_delay = 0;
capture_format_.interlaced = false;
if (capture_format.width > 320) { // VGA
@@ -91,8 +92,9 @@ void FakeVideoCaptureDevice::Allocate(
capture_format_.frame_rate = 30;
}
- size_t fake_frame_size =
- capture_format_.width * capture_format_.height * 3 / 2;
+ const size_t fake_frame_size = VideoFrame::AllocationSize(
+ VideoFrame::I420,
+ gfx::Size(capture_format_.width, capture_format_.height));
fake_frame_.reset(new uint8[fake_frame_size]);
state_ = kAllocated;
@@ -103,12 +105,13 @@ void FakeVideoCaptureDevice::Reallocate() {
DCHECK_EQ(state_, kCapturing);
capture_format_ = capabilities_roster_.at(++capabilities_roster_index_ %
capabilities_roster_.size());
- DCHECK_EQ(capture_format_.color, VideoCaptureCapability::kI420);
+ DCHECK_EQ(capture_format_.color, PIXEL_FORMAT_I420);
DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution ("
<< capture_format_.width << "x" << capture_format_.height << ")";
- size_t fake_frame_size =
- capture_format_.width * capture_format_.height * 3 / 2;
+ const size_t fake_frame_size = VideoFrame::AllocationSize(
+ VideoFrame::I420,
+ gfx::Size(capture_format_.width, capture_format_.height));
fake_frame_.reset(new uint8[fake_frame_size]);
observer_->OnFrameInfoChanged(capture_format_);
@@ -151,7 +154,9 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
return;
}
- int frame_size = capture_format_.width * capture_format_.height * 3 / 2;
+ const size_t frame_size = VideoFrame::AllocationSize(
+ VideoFrame::I420,
+ gfx::Size(capture_format_.width, capture_format_.height));
memset(fake_frame_.get(), 0, frame_size);
SkBitmap bitmap;
@@ -225,7 +230,7 @@ void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
media::VideoCaptureCapability(320,
240,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
VariableResolutionVideoCaptureDevice));
@@ -233,7 +238,7 @@ void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
media::VideoCaptureCapability(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
VariableResolutionVideoCaptureDevice));
@@ -241,7 +246,7 @@ void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
media::VideoCaptureCapability(800,
600,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
VariableResolutionVideoCaptureDevice));
diff --git a/chromium/media/video/capture/fake_video_capture_device.h b/chromium/media/video/capture/fake_video_capture_device.h
index 4804c2885a9..e8ab25567f7 100644
--- a/chromium/media/video/capture/fake_video_capture_device.h
+++ b/chromium/media/video/capture/fake_video_capture_device.h
@@ -16,7 +16,7 @@
namespace media {
-class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
+class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice1 {
public:
static VideoCaptureDevice* Create(const Name& device_name);
virtual ~FakeVideoCaptureDevice();
@@ -28,7 +28,7 @@ class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
// VideoCaptureDevice implementation.
virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
+ VideoCaptureDevice::EventHandler* observer) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
virtual void DeAllocate() OVERRIDE;
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.cc b/chromium/media/video/capture/linux/video_capture_device_linux.cc
index dd431a8b54e..fdd52772cb1 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.cc
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.cc
@@ -45,27 +45,32 @@ static const int32 kV4l2RawFmts[] = {
V4L2_PIX_FMT_YUYV
};
-// Linux USB camera devices have names like "UVC Camera (1234:fdcb)"
-static const char kUsbSuffixStart[] = " (";
-static const size_t kUsbModelSize = 9;
-static const char kUsbSuffixEnd[] = ")";
+// USB VID and PID are both 4 bytes long
+static const size_t kVidPidSize = 4;
-static VideoCaptureCapability::Format V4l2ColorToVideoCaptureColorFormat(
+// /sys/class/video4linux/video{N}/device is a symlink to the corresponding
+// USB device info directory.
+static const char kVidPathTemplate[] =
+ "/sys/class/video4linux/%s/device/../idVendor";
+static const char kPidPathTemplate[] =
+ "/sys/class/video4linux/%s/device/../idProduct";
+
+static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
int32 v4l2_fourcc) {
- VideoCaptureCapability::Format result = VideoCaptureCapability::kColorUnknown;
+ VideoPixelFormat result = PIXEL_FORMAT_UNKNOWN;
switch (v4l2_fourcc) {
case V4L2_PIX_FMT_YUV420:
- result = VideoCaptureCapability::kI420;
+ result = PIXEL_FORMAT_I420;
break;
case V4L2_PIX_FMT_YUYV:
- result = VideoCaptureCapability::kYUY2;
+ result = PIXEL_FORMAT_YUY2;
break;
case V4L2_PIX_FMT_MJPEG:
case V4L2_PIX_FMT_JPEG:
- result = VideoCaptureCapability::kMJPEG;
+ result = PIXEL_FORMAT_MJPEG;
break;
}
- DCHECK_NE(result, VideoCaptureCapability::kColorUnknown);
+ DCHECK_NE(result, PIXEL_FORMAT_UNKNOWN);
return result;
}
@@ -136,24 +141,39 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
}
+static bool ReadIdFile(const std::string path, std::string* id) {
+ char id_buf[kVidPidSize];
+ FILE* file = fopen(path.c_str(), "rb");
+ if (!file)
+ return false;
+ const bool success = fread(id_buf, kVidPidSize, 1, file) == 1;
+ fclose(file);
+ if (!success)
+ return false;
+ id->append(id_buf, kVidPidSize);
+ return true;
+}
+
const std::string VideoCaptureDevice::Name::GetModel() const {
- const size_t usb_suffix_start_size = sizeof(kUsbSuffixStart) - 1;
- const size_t usb_suffix_end_size = sizeof(kUsbSuffixEnd) - 1;
- const size_t suffix_size =
- usb_suffix_start_size + kUsbModelSize + usb_suffix_end_size;
- if (device_name_.length() < suffix_size)
+ // |unique_id| is of the form "/dev/video2". |file_name| is "video2".
+ const std::string dev_dir = "/dev/";
+ DCHECK_EQ(0, unique_id_.compare(0, dev_dir.length(), dev_dir));
+ const std::string file_name =
+ unique_id_.substr(dev_dir.length(), unique_id_.length());
+
+ const std::string vidPath =
+ base::StringPrintf(kVidPathTemplate, file_name.c_str());
+ const std::string pidPath =
+ base::StringPrintf(kPidPathTemplate, file_name.c_str());
+
+ std::string usb_id;
+ if (!ReadIdFile(vidPath, &usb_id))
return "";
- const std::string suffix = device_name_.substr(
- device_name_.length() - suffix_size, suffix_size);
-
- int start_compare =
- suffix.compare(0, usb_suffix_start_size, kUsbSuffixStart);
- int end_compare = suffix.compare(suffix_size - usb_suffix_end_size,
- usb_suffix_end_size, kUsbSuffixEnd);
- if (start_compare != 0 || end_compare != 0)
+ usb_id.append(":");
+ if (!ReadIdFile(pidPath, &usb_id))
return "";
- return suffix.substr(usb_suffix_start_size, kUsbModelSize);
+ return usb_id;
}
VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.h b/chromium/media/video/capture/linux/video_capture_device_linux.h
index dc35fd452ca..aab61aed77b 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.h
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.h
@@ -18,7 +18,7 @@
namespace media {
-class VideoCaptureDeviceLinux : public VideoCaptureDevice {
+class VideoCaptureDeviceLinux : public VideoCaptureDevice1 {
public:
explicit VideoCaptureDeviceLinux(const Name& device_name);
virtual ~VideoCaptureDeviceLinux();
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.h b/chromium/media/video/capture/mac/video_capture_device_mac.h
index 6ca24f32468..e600459e2c9 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.h
@@ -10,6 +10,9 @@
#include <string>
#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop_proxy.h"
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_types.h"
@@ -19,7 +22,7 @@ namespace media {
// Called by VideoCaptureManager to open, close and start, stop video capture
// devices.
-class VideoCaptureDeviceMac : public VideoCaptureDevice {
+class VideoCaptureDeviceMac : public VideoCaptureDevice1 {
public:
explicit VideoCaptureDeviceMac(const Name& device_name);
virtual ~VideoCaptureDeviceMac();
@@ -35,11 +38,17 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice {
bool Init();
// Called to deliver captured video frames.
- void ReceiveFrame(const uint8* video_frame, int video_frame_length,
- const VideoCaptureCapability& frame_info);
+ void ReceiveFrame(const uint8* video_frame,
+ int video_frame_length,
+ const VideoCaptureCapability& frame_info,
+ int aspect_numerator,
+ int aspect_denominator);
+
+ void ReceiveError(const std::string& reason);
private:
void SetErrorState(const std::string& reason);
+ bool UpdateCaptureResolution();
// Flag indicating the internal state.
enum InternalState {
@@ -52,8 +61,19 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice {
Name device_name_;
VideoCaptureDevice::EventHandler* observer_;
+
+ VideoCaptureCapability current_settings_;
+ bool sent_frame_info_;
+
+ // Only read and write state_ from inside this loop.
+ const scoped_refptr<base::MessageLoopProxy> loop_proxy_;
InternalState state_;
+ // Used with Bind and PostTask to ensure that methods aren't called
+ // after the VideoCaptureDeviceMac is destroyed.
+ base::WeakPtrFactory<VideoCaptureDeviceMac> weak_factory_;
+ base::WeakPtr<VideoCaptureDeviceMac> weak_this_;
+
VideoCaptureDeviceQTKit* capture_device_;
DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceMac);
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.mm b/chromium/media/video/capture/mac/video_capture_device_mac.mm
index 18912170a10..eea861481fe 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.mm
@@ -6,6 +6,8 @@
#import <QTKit/QTKit.h>
+#include "base/bind.h"
+#include "base/location.h"
#include "base/logging.h"
#include "base/time/time.h"
#include "media/video/capture/mac/video_capture_device_qtkit_mac.h"
@@ -23,10 +25,14 @@ struct Resolution {
int height;
};
-const Resolution kWellSupportedResolutions[] = {
- { 320, 240 },
- { 640, 480 },
- { 1280, 720 },
+const Resolution kQVGA = { 320, 240 },
+ kVGA = { 640, 480 },
+ kHD = { 1280, 720 };
+
+const Resolution* const kWellSupportedResolutions[] = {
+ &kQVGA,
+ &kVGA,
+ &kHD,
};
// TODO(ronghuawu): Replace this with CapabilityList::GetBestMatchedCapability.
@@ -36,13 +42,13 @@ void GetBestMatchSupportedResolution(int* width, int* height) {
int matched_height = *height;
int desired_res_area = *width * *height;
for (size_t i = 0; i < arraysize(kWellSupportedResolutions); ++i) {
- int area = kWellSupportedResolutions[i].width *
- kWellSupportedResolutions[i].height;
+ int area = kWellSupportedResolutions[i]->width *
+ kWellSupportedResolutions[i]->height;
int diff = std::abs(desired_res_area - area);
if (diff < min_diff) {
min_diff = diff;
- matched_width = kWellSupportedResolutions[i].width;
- matched_height = kWellSupportedResolutions[i].height;
+ matched_width = kWellSupportedResolutions[i]->width;
+ matched_height = kWellSupportedResolutions[i]->height;
}
}
*width = matched_width;
@@ -94,17 +100,23 @@ VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
VideoCaptureDeviceMac::VideoCaptureDeviceMac(const Name& device_name)
: device_name_(device_name),
observer_(NULL),
+ sent_frame_info_(false),
+ loop_proxy_(base::MessageLoopProxy::current()),
state_(kNotInitialized),
+ weak_factory_(this),
+ weak_this_(weak_factory_.GetWeakPtr()),
capture_device_(nil) {
}
VideoCaptureDeviceMac::~VideoCaptureDeviceMac() {
+ DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
[capture_device_ release];
}
void VideoCaptureDeviceMac::Allocate(
const VideoCaptureCapability& capture_format,
EventHandler* observer) {
+ DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
if (state_ != kIdle) {
return;
}
@@ -133,41 +145,57 @@ void VideoCaptureDeviceMac::Allocate(
else if (frame_rate > kMaxFrameRate)
frame_rate = kMaxFrameRate;
- if (![capture_device_ setCaptureHeight:height
- width:width
- frameRate:frame_rate]) {
- SetErrorState("Could not configure capture device.");
+ current_settings_.color = PIXEL_FORMAT_UYVY;
+ current_settings_.width = width;
+ current_settings_.height = height;
+ current_settings_.frame_rate = frame_rate;
+ current_settings_.expected_capture_delay = 0;
+ current_settings_.interlaced = false;
+
+ if (width != kHD.width || height != kHD.height) {
+ // If the resolution is VGA or QVGA, set the capture resolution to the
+ // target size. For most cameras (though not all), at these resolutions
+ // QTKit produces frames with square pixels.
+ if (!UpdateCaptureResolution())
+ return;
+
+ sent_frame_info_ = true;
+ observer_->OnFrameInfo(current_settings_);
+ }
+
+ // If the resolution is HD, start capturing without setting a resolution.
+ // QTKit will produce frames at the native resolution, allowing us to
+ // identify cameras whose native resolution is too low for HD. This
+ // additional information comes at a cost in startup latency, because the
+ // webcam will need to be reopened if its default resolution is not HD or VGA.
+
+ if (![capture_device_ startCapture]) {
+ SetErrorState("Could not start capture device.");
return;
}
state_ = kAllocated;
- VideoCaptureCapability current_settings;
- current_settings.color = VideoCaptureCapability::kARGB;
- current_settings.width = width;
- current_settings.height = height;
- current_settings.frame_rate = frame_rate;
- current_settings.expected_capture_delay = 0;
- current_settings.interlaced = false;
-
- observer_->OnFrameInfo(current_settings);
}
void VideoCaptureDeviceMac::Start() {
+ DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DCHECK_EQ(state_, kAllocated);
- if (![capture_device_ startCapture]) {
- SetErrorState("Could not start capture device.");
- return;
- }
state_ = kCapturing;
+
+ // This method no longer has any effect. Capturing is triggered by
+ // the call to Allocate.
+ // TODO(bemasc, ncarter): Remove this method.
}
void VideoCaptureDeviceMac::Stop() {
- DCHECK_EQ(state_, kCapturing);
+ DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
+ DCHECK(state_ == kCapturing || state_ == kError) << state_;
[capture_device_ stopCapture];
state_ = kAllocated;
}
void VideoCaptureDeviceMac::DeAllocate() {
+ DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
if (state_ != kAllocated && state_ != kCapturing) {
return;
}
@@ -185,6 +213,7 @@ const VideoCaptureDevice::Name& VideoCaptureDeviceMac::device_name() {
}
bool VideoCaptureDeviceMac::Init() {
+ DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DCHECK_EQ(state_, kNotInitialized);
Names device_names;
@@ -205,15 +234,77 @@ bool VideoCaptureDeviceMac::Init() {
void VideoCaptureDeviceMac::ReceiveFrame(
const uint8* video_frame,
int video_frame_length,
- const VideoCaptureCapability& frame_info) {
+ const VideoCaptureCapability& frame_info,
+ int aspect_numerator,
+ int aspect_denominator) {
+ // This method is safe to call from a device capture thread,
+ // i.e. any thread controlled by QTKit.
+
+ if (!sent_frame_info_) {
+ if (current_settings_.width == kHD.width &&
+ current_settings_.height == kHD.height) {
+ bool changeToVga = false;
+ if (frame_info.width < kHD.width || frame_info.height < kHD.height) {
+ // These are the default capture settings, not yet configured to match
+ // |current_settings_|.
+ DCHECK(frame_info.frame_rate == 0);
+ DVLOG(1) << "Switching to VGA because the default resolution is " <<
+ frame_info.width << "x" << frame_info.height;
+ changeToVga = true;
+ }
+ if (frame_info.width == kHD.width && frame_info.height == kHD.height &&
+ aspect_numerator != aspect_denominator) {
+ DVLOG(1) << "Switching to VGA because HD has nonsquare pixel " <<
+ "aspect ratio " << aspect_numerator << ":" << aspect_denominator;
+ changeToVga = true;
+ }
+
+ if (changeToVga) {
+ current_settings_.width = kVGA.width;
+ current_settings_.height = kVGA.height;
+ }
+ }
+
+ if (current_settings_.width == frame_info.width &&
+ current_settings_.height == frame_info.height) {
+ sent_frame_info_ = true;
+ observer_->OnFrameInfo(current_settings_);
+ } else {
+ UpdateCaptureResolution();
+ // The current frame does not have the right width and height, so it
+ // must not be passed to |observer_|.
+ return;
+ }
+ }
+
+ DCHECK(current_settings_.width == frame_info.width &&
+ current_settings_.height == frame_info.height);
+
observer_->OnIncomingCapturedFrame(
video_frame, video_frame_length, base::Time::Now(), 0, false, false);
}
+void VideoCaptureDeviceMac::ReceiveError(const std::string& reason) {
+ loop_proxy_->PostTask(FROM_HERE,
+ base::Bind(&VideoCaptureDeviceMac::SetErrorState, weak_this_,
+ reason));
+}
+
void VideoCaptureDeviceMac::SetErrorState(const std::string& reason) {
+ DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DLOG(ERROR) << reason;
state_ = kError;
observer_->OnError();
}
+bool VideoCaptureDeviceMac::UpdateCaptureResolution() {
+ if (![capture_device_ setCaptureHeight:current_settings_.height
+ width:current_settings_.width
+ frameRate:current_settings_.frame_rate]) {
+ ReceiveError("Could not configure capture device.");
+ return false;
+ }
+ return true;
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
index 3c9a3db5f99..d032ef0481f 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
@@ -23,8 +23,6 @@ namespace media {
@private
// Settings.
int frameRate_;
- int frameWidth_;
- int frameHeight_;
NSLock *lock_;
media::VideoCaptureDeviceMac *frameReceiver_;
@@ -60,6 +58,9 @@ namespace media {
// Stops video capturing.
- (void)stopCapture;
+// Handle any QTCaptureSessionRuntimeErrorNotifications.
+- (void)handleNotification:(NSNotification *)errorNotification;
+
@end
#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_QTKIT_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
index fa2d7c3be68..2b7e28e4e70 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
@@ -108,6 +108,13 @@
// particular crash.
base::debug::SetCrashKeyValue("VideoCaptureDeviceQTKit", "OpenedDevice");
+ // Set the video pixel format to 2VUY (a.k.a UYVY, packed 4:2:2).
+ NSDictionary *captureDictionary = [NSDictionary
+ dictionaryWithObject:
+ [NSNumber numberWithUnsignedInt:kCVPixelFormatType_422YpCbCr8]
+ forKey:(id)kCVPixelBufferPixelFormatTypeKey];
+ [captureDecompressedOutput setPixelBufferAttributes:captureDictionary];
+
return YES;
} else {
// Remove the previously set capture device.
@@ -121,6 +128,7 @@
}
if ([[captureSession_ outputs] count] > 0) {
// Only one output is set for |captureSession_|.
+ DCHECK_EQ([[captureSession_ outputs] count], 1u);
id output = [[captureSession_ outputs] objectAtIndex:0];
[output setDelegate:nil];
@@ -159,25 +167,29 @@
return NO;
}
- frameWidth_ = width;
- frameHeight_ = height;
frameRate_ = frameRate;
+ QTCaptureDecompressedVideoOutput *output =
+ [[captureSession_ outputs] objectAtIndex:0];
+
+ // The old capture dictionary is used to retrieve the initial pixel
+ // format, which must be maintained.
+ NSDictionary *oldCaptureDictionary = [output pixelBufferAttributes];
+
// Set up desired output properties.
NSDictionary *captureDictionary =
[NSDictionary dictionaryWithObjectsAndKeys:
- [NSNumber numberWithDouble:frameWidth_],
+ [NSNumber numberWithDouble:width],
(id)kCVPixelBufferWidthKey,
- [NSNumber numberWithDouble:frameHeight_],
+ [NSNumber numberWithDouble:height],
(id)kCVPixelBufferHeightKey,
- [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA],
+ [oldCaptureDictionary
+ valueForKey:(id)kCVPixelBufferPixelFormatTypeKey],
(id)kCVPixelBufferPixelFormatTypeKey,
nil];
- [[[captureSession_ outputs] objectAtIndex:0]
- setPixelBufferAttributes:captureDictionary];
+ [output setPixelBufferAttributes:captureDictionary];
- [[[captureSession_ outputs] objectAtIndex:0]
- setMinimumVideoFrameInterval:(NSTimeInterval)1/(float)frameRate];
+ [output setMinimumVideoFrameInterval:(NSTimeInterval)1/(float)frameRate];
return YES;
}
@@ -194,6 +206,12 @@
<< [[error localizedDescription] UTF8String];
return NO;
}
+ NSNotificationCenter * notificationCenter =
+ [NSNotificationCenter defaultCenter];
+ [notificationCenter addObserver:self
+ selector:@selector(handleNotification:)
+ name:QTCaptureSessionRuntimeErrorNotification
+ object:captureSession_];
[captureSession_ startRunning];
}
return YES;
@@ -204,6 +222,8 @@
[captureSession_ removeInput:captureDeviceInput_];
[captureSession_ stopRunning];
}
+
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
}
// |captureOutput| is called by the capture device to deliver a new frame.
@@ -223,10 +243,11 @@
== kCVReturnSuccess) {
void *baseAddress = CVPixelBufferGetBaseAddress(videoFrame);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(videoFrame);
- int frameHeight = CVPixelBufferGetHeight(videoFrame);
- int frameSize = bytesPerRow * frameHeight;
+ size_t frameWidth = CVPixelBufferGetWidth(videoFrame);
+ size_t frameHeight = CVPixelBufferGetHeight(videoFrame);
+ size_t frameSize = bytesPerRow * frameHeight;
- // TODO(shess): bytesPerRow may not correspond to frameWidth_*4,
+ // TODO(shess): bytesPerRow may not correspond to frameWidth_*2,
// but VideoCaptureController::OnIncomingCapturedFrame() requires
// it to do so. Plumbing things through is intrusive, for now
// just deliver an adjusted buffer.
@@ -234,7 +255,8 @@
// VideoCaptureController::OnIncomingCapturedVideoFrame, which supports
// pitches.
UInt8* addressToPass = static_cast<UInt8*>(baseAddress);
- size_t expectedBytesPerRow = frameWidth_ * 4;
+ // UYVY is 2 bytes per pixel.
+ size_t expectedBytesPerRow = frameWidth * 2;
if (bytesPerRow > expectedBytesPerRow) {
// TODO(shess): frameHeight and frameHeight_ are not the same,
// try to do what the surrounding code seems to assume.
@@ -244,7 +266,7 @@
// std::vector is contiguous according to standard.
UInt8* adjustedAddress = &adjustedFrame_[0];
- for (int y = 0; y < frameHeight; ++y) {
+ for (size_t y = 0; y < frameHeight; ++y) {
memcpy(adjustedAddress + y * expectedBytesPerRow,
addressToPass + y * bytesPerRow,
expectedBytesPerRow);
@@ -254,19 +276,43 @@
frameSize = frameHeight * expectedBytesPerRow;
}
media::VideoCaptureCapability captureCapability;
- captureCapability.width = frameWidth_;
- captureCapability.height = frameHeight_;
+ captureCapability.width = frameWidth;
+ captureCapability.height = frameHeight;
captureCapability.frame_rate = frameRate_;
- captureCapability.color = media::VideoCaptureCapability::kARGB;
+ captureCapability.color = media::PIXEL_FORMAT_UYVY;
captureCapability.expected_capture_delay = 0;
captureCapability.interlaced = false;
+ // The aspect ratio dictionary is often missing, in which case we report
+ // a pixel aspect ratio of 0:0.
+ int aspectNumerator = 0, aspectDenominator = 0;
+ CFDictionaryRef aspectRatioDict = (CFDictionaryRef)CVBufferGetAttachment(
+ videoFrame, kCVImageBufferPixelAspectRatioKey, NULL);
+ if (aspectRatioDict) {
+ CFNumberRef aspectNumeratorRef = (CFNumberRef)CFDictionaryGetValue(
+ aspectRatioDict, kCVImageBufferPixelAspectRatioHorizontalSpacingKey);
+ CFNumberRef aspectDenominatorRef = (CFNumberRef)CFDictionaryGetValue(
+ aspectRatioDict, kCVImageBufferPixelAspectRatioVerticalSpacingKey);
+ DCHECK(aspectNumeratorRef && aspectDenominatorRef) <<
+ "Aspect Ratio dictionary missing its entries.";
+ CFNumberGetValue(aspectNumeratorRef, kCFNumberIntType, &aspectNumerator);
+ CFNumberGetValue(
+ aspectDenominatorRef, kCFNumberIntType, &aspectDenominator);
+ }
+
// Deliver the captured video frame.
- frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureCapability);
+ frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureCapability,
+ aspectNumerator, aspectDenominator);
CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
}
[lock_ unlock];
}
+- (void)handleNotification:(NSNotification *)errorNotification {
+ NSError * error = (NSError *)[[errorNotification userInfo]
+ objectForKey:QTCaptureSessionErrorKey];
+ frameReceiver_->ReceiveError([[error localizedDescription] UTF8String]);
+}
+
@end
diff --git a/chromium/media/video/capture/video_capture.h b/chromium/media/video/capture/video_capture.h
index 47c779064f8..3a4eb0e2d32 100644
--- a/chromium/media/video/capture/video_capture.h
+++ b/chromium/media/video/capture/video_capture.h
@@ -11,37 +11,14 @@
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
-#include "media/base/video_frame.h"
#include "media/video/capture/video_capture_types.h"
namespace media {
+class VideoFrame;
+
class MEDIA_EXPORT VideoCapture {
public:
- // TODO(wjia): consider merging with media::VideoFrame if possible.
- class VideoFrameBuffer : public base::RefCountedThreadSafe<VideoFrameBuffer> {
- public:
- VideoFrameBuffer()
- : width(0),
- height(0),
- stride(0),
- buffer_size(0),
- memory_pointer(NULL) {}
-
- int width;
- int height;
- int stride;
- size_t buffer_size;
- uint8* memory_pointer;
- base::Time timestamp;
-
- private:
- friend class base::RefCountedThreadSafe<VideoFrameBuffer>;
- ~VideoFrameBuffer() {}
-
- DISALLOW_COPY_AND_ASSIGN(VideoFrameBuffer);
- };
-
// TODO(wjia): add error codes.
// TODO(wjia): support weak ptr.
// Callbacks provided by client for notification of events.
@@ -64,8 +41,9 @@ class MEDIA_EXPORT VideoCapture {
virtual void OnRemoved(VideoCapture* capture) = 0;
// Notify client that a buffer is available.
- virtual void OnBufferReady(VideoCapture* capture,
- scoped_refptr<VideoFrameBuffer> buffer) = 0;
+ virtual void OnFrameReady(
+ VideoCapture* capture,
+ const scoped_refptr<media::VideoFrame>& frame) = 0;
// Notify client about device info.
virtual void OnDeviceInfoReceived(
@@ -93,9 +71,6 @@ class MEDIA_EXPORT VideoCapture {
// |handler| must remain valid until it has received |OnRemoved()|.
virtual void StopCapture(EventHandler* handler) = 0;
- // Feed buffer to video capture when done with it.
- virtual void FeedBuffer(scoped_refptr<VideoFrameBuffer> buffer) = 0;
-
virtual bool CaptureStarted() = 0;
virtual int CaptureWidth() = 0;
virtual int CaptureHeight() = 0;
diff --git a/chromium/media/video/capture/video_capture_device.cc b/chromium/media/video/capture/video_capture_device.cc
index 5c1245613dd..4175412138f 100644
--- a/chromium/media/video/capture/video_capture_device.cc
+++ b/chromium/media/video/capture/video_capture_device.cc
@@ -3,17 +3,18 @@
// found in the LICENSE file.
#include "media/video/capture/video_capture_device.h"
+#include "base/strings/string_util.h"
namespace media {
const std::string VideoCaptureDevice::Name::GetNameAndModel() const {
-// On Linux, the device name already includes the model identifier.
-#if !defined(OS_LINUX)
- std::string model_id = GetModel();
- if (!model_id.empty())
- return device_name_ + " (" + model_id + ")";
-#endif // if !defined(OS_LINUX)
- return device_name_;
+ const std::string model_id = GetModel();
+ if (model_id.empty())
+ return device_name_;
+ const std::string suffix = " (" + model_id + ")";
+ if (EndsWith(device_name_, suffix, true)) // |true| means case-sensitive.
+ return device_name_;
+ return device_name_ + suffix;
}
VideoCaptureDevice::Name*
@@ -25,4 +26,25 @@ VideoCaptureDevice::Names::FindById(const std::string& id) {
return NULL;
}
+VideoCaptureDevice::~VideoCaptureDevice() {}
+
+VideoCaptureDevice1::VideoCaptureDevice1() {}
+
+VideoCaptureDevice1::~VideoCaptureDevice1() {}
+
+void VideoCaptureDevice1::AllocateAndStart(
+ const VideoCaptureCapability& capture_format,
+ scoped_ptr<EventHandler> client) {
+ client_ = client.Pass();
+ Allocate(capture_format, client_.get());
+ Start();
+}
+
+void VideoCaptureDevice1::StopAndDeAllocate() {
+ Stop();
+ DeAllocate();
+ client_.reset();
+};
+
+
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device.h b/chromium/media/video/capture/video_capture_device.h
index 4480116751a..e7340841cee 100644
--- a/chromium/media/video/capture/video_capture_device.h
+++ b/chromium/media/video/capture/video_capture_device.h
@@ -72,7 +72,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
// In the shared build, all methods from the STL container will be exported
// so even though they're not used, they're still depended upon.
bool operator==(const Name& other) const {
- return other.id() == unique_id_ && other.name() == device_name_;
+ return other.id() == unique_id_;
}
bool operator<(const Name& other) const {
return unique_id_ < other.id();
@@ -90,7 +90,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
#if defined(OS_WIN)
// This class wraps the CaptureApiType, so it has a by default value if not
// inititalized, and I (mcasas) do a DCHECK on reading its value.
- class CaptureApiClass{
+ class CaptureApiClass {
public:
CaptureApiClass(): capture_api_type_(API_TYPE_UNKNOWN) {}
CaptureApiClass(const CaptureApiType api_type)
@@ -120,6 +120,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
class MEDIA_EXPORT EventHandler {
public:
+ virtual ~EventHandler() {}
// Reserve an output buffer into which a video frame can be captured
// directly. If all buffers are currently busy, returns NULL.
@@ -133,13 +134,6 @@ class MEDIA_EXPORT VideoCaptureDevice {
// VideoCaptureDevice until either the last reference to the VideoFrame is
// released, or until the buffer is passed back to the EventHandler's
// OnIncomingCapturedFrame() method.
- //
- // Threading note: After VideoCaptureDevice::DeAllocate() occurs, the
- // VideoCaptureDevice is not permitted to make any additional calls through
- // its EventHandler. However, any VideoFrames returned from the EventHandler
- // DO remain valid after DeAllocate(). The VideoCaptureDevice must still
- // eventually release them, but it may do so later -- e.g., after a queued
- // capture operation completes.
virtual scoped_refptr<media::VideoFrame> ReserveOutputBuffer() = 0;
// Captured a new video frame as a raw buffer. The size, color format, and
@@ -177,24 +171,21 @@ class MEDIA_EXPORT VideoCaptureDevice {
base::Time timestamp) = 0;
// An error has occurred that cannot be handled and VideoCaptureDevice must
- // be DeAllocate()-ed.
+ // be StopAndDeAllocate()-ed.
virtual void OnError() = 0;
- // Called when VideoCaptureDevice::Allocate() has been called to inform of
- // the resulting frame size.
+ // Called when VideoCaptureDevice::AllocateAndStart() has been called to
+ // inform of the resulting frame size.
virtual void OnFrameInfo(const VideoCaptureCapability& info) = 0;
// Called when the native resolution of VideoCaptureDevice has been changed
// and it needs to inform its client of the new frame size.
virtual void OnFrameInfoChanged(const VideoCaptureCapability& info) {};
-
- protected:
- virtual ~EventHandler() {}
};
// Creates a VideoCaptureDevice object.
// Return NULL if the hardware is not available.
static VideoCaptureDevice* Create(const Name& device_name);
- virtual ~VideoCaptureDevice() {}
+ virtual ~VideoCaptureDevice();
// Gets the names of all video capture devices connected to this computer.
static void GetDeviceNames(Names* device_names);
@@ -202,10 +193,56 @@ class MEDIA_EXPORT VideoCaptureDevice {
// Prepare the camera for use. After this function has been called no other
// applications can use the camera. On completion EventHandler::OnFrameInfo()
// is called informing of the resulting resolution and frame rate.
+ // StopAndDeAllocate() must be called before the object is deleted.
+ virtual void AllocateAndStart(
+ const VideoCaptureCapability& capture_format,
+ scoped_ptr<EventHandler> client) = 0;
+
+ // Deallocates the camera, possibly asynchronously.
+ //
+ // This call requires the device to do the following things, eventually: put
+ // camera hardware into a state where other applications could use it, free
+ // the memory associated with capture, and delete the |client| pointer passed
+ // into AllocateAndStart.
+ //
+ // If deallocation is done asynchronously, then the device implementation must
+ // ensure that a subsequent AllocateAndStart() operation targeting the same ID
+ // would be sequenced through the same task runner, so that deallocation
+ // happens first.
+ virtual void StopAndDeAllocate() = 0;
+};
+
+// VideoCaptureDevice1 is a bridge to an older API against which
+// VideoCaptureDevices were implemented. Differences between VideoCaptureDevice
+// (new style) and VideoCaptureDevice1 (old style) are as follows:
+//
+// [1] The Stop+DeAllocate calls are merged in the new style.
+// [2] The Allocate+Start calls are merged in the new style.
+// [3] New style devices own their EventHandler* pointers, allowing handlers to
+// remain valid even after the device is stopped. Whereas old style devices
+// may not dereference their handlers after DeAllocate().
+// [4] device_name() is eliminated from the new-style interface.
+//
+// TODO(nick): Remove this bridge class. It exists to enable incremental
+// migration to an alternative VideoCaptureDevice API.
+class MEDIA_EXPORT VideoCaptureDevice1 : public VideoCaptureDevice {
+ public:
+ VideoCaptureDevice1();
+ virtual ~VideoCaptureDevice1();
+
+ // VideoCaptureDevice implementation.
+ virtual void AllocateAndStart(
+ const VideoCaptureCapability& capture_format,
+ scoped_ptr<EventHandler> client) OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
+
+ // Prepare the camera for use. After this function has been called no other
+ // applications can use the camera. On completion EventHandler::OnFrameInfo()
+ // is called informing of the resulting resolution and frame rate.
// DeAllocate() must be called before this function can be called again and
// before the object is deleted.
virtual void Allocate(const VideoCaptureCapability& capture_format,
- EventHandler* observer) = 0;
+ EventHandler* client) = 0;
// Start capturing video frames. Allocate must be called before this function.
virtual void Start() = 0;
@@ -221,6 +258,10 @@ class MEDIA_EXPORT VideoCaptureDevice {
// Get the name of the capture device.
virtual const Name& device_name() = 0;
+
+ private:
+ // The device client which proxies device events to the controller.
+ scoped_ptr<EventHandler> client_;
};
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device_dummy.cc b/chromium/media/video/capture/video_capture_device_dummy.cc
deleted file mode 100644
index 02752edc54f..00000000000
--- a/chromium/media/video/capture/video_capture_device_dummy.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/capture/video_capture_device_dummy.h"
-
-namespace media {
-
-VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
- return NULL;
-}
-
-void VideoCaptureDevice::GetDeviceNames(Names* device_names) {}
-
-VideoCaptureDeviceDummy::VideoCaptureDeviceDummy() {}
-
-VideoCaptureDeviceDummy::~VideoCaptureDeviceDummy() {}
-
-void VideoCaptureDeviceDummy::Allocate(
- const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) {
-}
-
-void VideoCaptureDeviceDummy::Start() {}
-
-void VideoCaptureDeviceDummy::Stop() {}
-
-void VideoCaptureDeviceDummy::DeAllocate() {}
-
-} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device_dummy.h b/chromium/media/video/capture/video_capture_device_dummy.h
deleted file mode 100644
index c4a95cb0ce2..00000000000
--- a/chromium/media/video/capture/video_capture_device_dummy.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// A dummy implementation of VideoCaptureDevice to use for platforms without
-// real video capture support. The class will be removed once the other
-// platforms have real video capture device support.
-//
-// TODO(mflodman) Remove when video_capture_device_mac and
-// video_capture_device_win are available.
-
-#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DUMMY_H_
-#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DUMMY_H_
-
-#include "base/compiler_specific.h"
-#include "media/video/capture/video_capture_device.h"
-
-namespace media {
-
-class VideoCaptureDeviceDummy : public VideoCaptureDevice {
- public:
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
-
- private:
- VideoCaptureDeviceDummy();
- virtual ~VideoCaptureDeviceDummy();
-
- DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceDummy);
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DUMMY_H_
diff --git a/chromium/media/video/capture/video_capture_device_unittest.cc b/chromium/media/video/capture/video_capture_device_unittest.cc
index e39c59b0541..586060f169f 100644
--- a/chromium/media/video/capture/video_capture_device_unittest.cc
+++ b/chromium/media/video/capture/video_capture_device_unittest.cc
@@ -2,8 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/run_loop.h"
#include "base/synchronization/waitable_event.h"
#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
@@ -65,8 +69,10 @@ class MockFrameObserver : public media::VideoCaptureDevice::EventHandler {
MOCK_METHOD1(OnFrameInfo, void(const VideoCaptureCapability&));
MOCK_METHOD1(OnFrameInfoChanged, void(const VideoCaptureCapability&));
- explicit MockFrameObserver(base::WaitableEvent* wait_event)
- : wait_event_(wait_event) {}
+ explicit MockFrameObserver(
+ base::Closure frame_cb)
+ : main_thread_(base::MessageLoopProxy::current()),
+ frame_cb_(frame_cb) {}
virtual void OnError() OVERRIDE {
OnErr();
@@ -79,45 +85,40 @@ class MockFrameObserver : public media::VideoCaptureDevice::EventHandler {
int rotation,
bool flip_vert,
bool flip_horiz) OVERRIDE {
- wait_event_->Signal();
+ main_thread_->PostTask(FROM_HERE, frame_cb_);
}
virtual void OnIncomingCapturedVideoFrame(
const scoped_refptr<media::VideoFrame>& frame,
base::Time timestamp) OVERRIDE {
- wait_event_->Signal();
+ main_thread_->PostTask(FROM_HERE, frame_cb_);
}
private:
- base::WaitableEvent* wait_event_;
+ scoped_refptr<base::MessageLoopProxy> main_thread_;
+ base::Closure frame_cb_;
};
class VideoCaptureDeviceTest : public testing::Test {
- public:
- VideoCaptureDeviceTest(): wait_event_(false, false) { }
-
- void PostQuitTask() {
- loop_->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
- loop_->Run();
- }
-
protected:
+ typedef media::VideoCaptureDevice::EventHandler EventHandler;
+
virtual void SetUp() {
- frame_observer_.reset(new MockFrameObserver(&wait_event_));
loop_.reset(new base::MessageLoopForUI());
+ frame_observer_.reset(new MockFrameObserver(loop_->QuitClosure()));
#if defined(OS_ANDROID)
media::VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(
base::android::AttachCurrentThread());
#endif
}
- virtual void TearDown() {
+ void WaitForCapturedFrame() {
+ loop_->Run();
}
#if defined(OS_WIN)
base::win::ScopedCOMInitializer initialize_com_;
#endif
- base::WaitableEvent wait_event_;
scoped_ptr<MockFrameObserver> frame_observer_;
VideoCaptureDevice::Names names_;
scoped_ptr<base::MessageLoop> loop_;
@@ -159,19 +160,17 @@ TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
VideoCaptureCapability capture_format(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
- device->Allocate(capture_format, frame_observer_.get());
- device->Start();
+ device->AllocateAndStart(capture_format,
+ frame_observer_.PassAs<EventHandler>());
// Get captured video frames.
- PostQuitTask();
- EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ loop_->Run();
EXPECT_EQ(rx_capability.width, 640);
EXPECT_EQ(rx_capability.height, 480);
- device->Stop();
- device->DeAllocate();
+ device->StopAndDeAllocate();
}
TEST_F(VideoCaptureDeviceTest, Capture720p) {
@@ -197,17 +196,15 @@ TEST_F(VideoCaptureDeviceTest, Capture720p) {
VideoCaptureCapability capture_format(1280,
720,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
- device->Allocate(capture_format, frame_observer_.get());
- device->Start();
+ device->AllocateAndStart(capture_format,
+ frame_observer_.PassAs<EventHandler>());
// Get captured video frames.
- PostQuitTask();
- EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
- device->Stop();
- device->DeAllocate();
+ WaitForCapturedFrame();
+ device->StopAndDeAllocate();
}
TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
@@ -231,12 +228,13 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
VideoCaptureCapability capture_format(637,
472,
35,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
- device->Allocate(capture_format, frame_observer_.get());
- device->DeAllocate();
+ device->AllocateAndStart(capture_format,
+ frame_observer_.PassAs<EventHandler>());
+ device->StopAndDeAllocate();
EXPECT_EQ(rx_capability.width, 640);
EXPECT_EQ(rx_capability.height, 480);
}
@@ -247,58 +245,67 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
+
+ // First, do a number of very fast device start/stops.
+ for (int i = 0; i <= 5; i++) {
+ scoped_ptr<MockFrameObserver> frame_observer(
+ new MockFrameObserver(base::Bind(&base::DoNothing)));
+ scoped_ptr<VideoCaptureDevice> device(
+ VideoCaptureDevice::Create(names_.front()));
+ gfx::Size resolution;
+ if (i % 2) {
+ resolution = gfx::Size(640, 480);
+ } else {
+ resolution = gfx::Size(1280, 1024);
+ }
+ VideoCaptureCapability requested_format(
+ resolution.width(),
+ resolution.height(),
+ 30,
+ PIXEL_FORMAT_I420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+
+ // The device (if it is an async implementation) may or may not get as far
+ // as the OnFrameInfo() step; we're intentionally not going to wait for it
+ // to get that far.
+ ON_CALL(*frame_observer, OnFrameInfo(_));
+ device->AllocateAndStart(requested_format,
+ frame_observer.PassAs<EventHandler>());
+ device->StopAndDeAllocate();
+ }
+
+ // Finally, do a device start and wait for it to finish.
+ gfx::Size resolution;
+ VideoCaptureCapability requested_format(
+ 320,
+ 240,
+ 30,
+ PIXEL_FORMAT_I420,
+ 0,
+ false,
+ ConstantResolutionVideoCaptureDevice);
+
+ base::RunLoop run_loop;
+ scoped_ptr<MockFrameObserver> frame_observer(
+ new MockFrameObserver(base::Bind(run_loop.QuitClosure())));
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
- ASSERT_TRUE(device.get() != NULL);
- EXPECT_CALL(*frame_observer_, OnErr())
- .Times(0);
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability_1;
- VideoCaptureCapability rx_capability_2;
- VideoCaptureCapability capture_format_1(640,
- 480,
- 30,
- VideoCaptureCapability::kI420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- VideoCaptureCapability capture_format_2(1280,
- 1024,
- 30,
- VideoCaptureCapability::kI420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- VideoCaptureCapability capture_format_3(320,
- 240,
- 30,
- VideoCaptureCapability::kI420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .WillOnce(SaveArg<0>(&rx_capability_1));
- device->Allocate(capture_format_1, frame_observer_.get());
- device->Start();
- // Nothing shall happen.
- device->Allocate(capture_format_2, frame_observer_.get());
- device->DeAllocate();
- // Allocate new size 320, 240
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .WillOnce(SaveArg<0>(&rx_capability_2));
- device->Allocate(capture_format_3, frame_observer_.get());
-
- device->Start();
- // Get captured video frames.
- PostQuitTask();
- EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
- EXPECT_EQ(rx_capability_1.width, 640);
- EXPECT_EQ(rx_capability_1.height, 480);
- EXPECT_EQ(rx_capability_2.width, 320);
- EXPECT_EQ(rx_capability_2.height, 240);
- device->Stop();
- device->DeAllocate();
+ // The device (if it is an async implementation) may or may not get as far
+ // as the OnFrameInfo() step; we're intentionally not going to wait for it
+ // to get that far.
+ VideoCaptureCapability final_format;
+ EXPECT_CALL(*frame_observer, OnFrameInfo(_))
+ .Times(1).WillOnce(SaveArg<0>(&final_format));
+ device->AllocateAndStart(requested_format,
+ frame_observer.PassAs<EventHandler>());
+ run_loop.Run(); // Waits for a frame.
+ device->StopAndDeAllocate();
+ device.reset();
+ EXPECT_EQ(final_format.width, 320);
+ EXPECT_EQ(final_format.height, 240);
}
TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
@@ -321,20 +328,18 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
VideoCaptureCapability capture_format(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
- device->Allocate(capture_format, frame_observer_.get());
-
- device->Start();
+ device->AllocateAndStart(capture_format,
+ frame_observer_.PassAs<EventHandler>());
// Get captured video frames.
- PostQuitTask();
- EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ WaitForCapturedFrame();
EXPECT_EQ(rx_capability.width, 640);
EXPECT_EQ(rx_capability.height, 480);
EXPECT_EQ(rx_capability.frame_rate, 30);
- device->DeAllocate();
+ device->StopAndDeAllocate();
}
TEST_F(VideoCaptureDeviceTest, FakeCapture) {
@@ -359,19 +364,17 @@ TEST_F(VideoCaptureDeviceTest, FakeCapture) {
VideoCaptureCapability capture_format(640,
480,
30,
- VideoCaptureCapability::kI420,
+ PIXEL_FORMAT_I420,
0,
false,
ConstantResolutionVideoCaptureDevice);
- device->Allocate(capture_format, frame_observer_.get());
-
- device->Start();
- EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
+ device->AllocateAndStart(capture_format,
+ frame_observer_.PassAs<EventHandler>());
+ WaitForCapturedFrame();
EXPECT_EQ(rx_capability.width, 640);
EXPECT_EQ(rx_capability.height, 480);
EXPECT_EQ(rx_capability.frame_rate, 30);
- device->Stop();
- device->DeAllocate();
+ device->StopAndDeAllocate();
}
// Start the camera in 720p to capture MJPEG instead of a raw format.
@@ -396,18 +399,16 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
VideoCaptureCapability capture_format(1280,
720,
30,
- VideoCaptureCapability::kMJPEG,
+ PIXEL_FORMAT_MJPEG,
0,
false,
ConstantResolutionVideoCaptureDevice);
- device->Allocate(capture_format, frame_observer_.get());
-
- device->Start();
+ device->AllocateAndStart(capture_format,
+ frame_observer_.PassAs<EventHandler>());
// Get captured video frames.
- PostQuitTask();
- EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_max_timeout()));
- EXPECT_EQ(rx_capability.color, VideoCaptureCapability::kMJPEG);
- device->DeAllocate();
+ WaitForCapturedFrame();
+ EXPECT_EQ(rx_capability.color, PIXEL_FORMAT_MJPEG);
+ device->StopAndDeAllocate();
}
TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
@@ -432,22 +433,21 @@ TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
EXPECT_CALL(*frame_observer_, OnErr())
.Times(0);
+ int action_count = 200;
+ EXPECT_CALL(*frame_observer_, OnFrameInfoChanged(_))
+ .Times(AtLeast(action_count / 30));
- device->Allocate(capture_format, frame_observer_.get());
+ device->AllocateAndStart(capture_format,
+ frame_observer_.PassAs<EventHandler>());
// The amount of times the OnFrameInfoChanged gets called depends on how often
// FakeDevice is supposed to change and what is its actual frame rate.
// We set TimeWait to 200 action timeouts and this should be enough for at
// least action_count/kFakeCaptureCapabilityChangePeriod calls.
- int action_count = 200;
- EXPECT_CALL(*frame_observer_, OnFrameInfoChanged(_))
- .Times(AtLeast(action_count / 30));
- device->Start();
for (int i = 0; i < action_count; ++i) {
- EXPECT_TRUE(wait_event_.TimedWait(TestTimeouts::action_timeout()));
+ WaitForCapturedFrame();
}
- device->Stop();
- device->DeAllocate();
+ device->StopAndDeAllocate();
}
}; // namespace media
diff --git a/chromium/media/video/capture/video_capture_proxy.cc b/chromium/media/video/capture/video_capture_proxy.cc
index eb8fd071909..3adbb7ce3b2 100644
--- a/chromium/media/video/capture/video_capture_proxy.cc
+++ b/chromium/media/video/capture/video_capture_proxy.cc
@@ -77,15 +77,16 @@ void VideoCaptureHandlerProxy::OnRemoved(VideoCapture* capture) {
GetState(capture)));
}
-void VideoCaptureHandlerProxy::OnBufferReady(
+void VideoCaptureHandlerProxy::OnFrameReady(
VideoCapture* capture,
- scoped_refptr<VideoCapture::VideoFrameBuffer> buffer) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnBufferReadyOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture),
- buffer));
+ const scoped_refptr<VideoFrame>& frame) {
+ main_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureHandlerProxy::OnFrameReadyOnMainThread,
+ base::Unretained(this),
+ capture,
+ GetState(capture),
+ frame));
}
void VideoCaptureHandlerProxy::OnDeviceInfoReceived(
@@ -135,12 +136,12 @@ void VideoCaptureHandlerProxy::OnRemovedOnMainThread(
proxied_->OnRemoved(capture);
}
-void VideoCaptureHandlerProxy::OnBufferReadyOnMainThread(
+void VideoCaptureHandlerProxy::OnFrameReadyOnMainThread(
VideoCapture* capture,
const VideoCaptureState& state,
- scoped_refptr<VideoCapture::VideoFrameBuffer> buffer) {
+ const scoped_refptr<VideoFrame>& frame) {
state_ = state;
- proxied_->OnBufferReady(capture, buffer);
+ proxied_->OnFrameReady(capture, frame);
}
void VideoCaptureHandlerProxy::OnDeviceInfoReceivedOnMainThread(
diff --git a/chromium/media/video/capture/video_capture_proxy.h b/chromium/media/video/capture/video_capture_proxy.h
index 78c459d1849..fbb75776abe 100644
--- a/chromium/media/video/capture/video_capture_proxy.h
+++ b/chromium/media/video/capture/video_capture_proxy.h
@@ -50,9 +50,8 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
virtual void OnPaused(VideoCapture* capture) OVERRIDE;
virtual void OnError(VideoCapture* capture, int error_code) OVERRIDE;
virtual void OnRemoved(VideoCapture* capture) OVERRIDE;
- virtual void OnBufferReady(
- VideoCapture* capture,
- scoped_refptr<VideoCapture::VideoFrameBuffer> buffer) OVERRIDE;
+ virtual void OnFrameReady(VideoCapture* capture,
+ const scoped_refptr<VideoFrame>& frame) OVERRIDE;
virtual void OnDeviceInfoReceived(
VideoCapture* capture,
const VideoCaptureParams& device_info) OVERRIDE;
@@ -75,14 +74,12 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
void OnRemovedOnMainThread(
VideoCapture* capture,
const VideoCaptureState& state);
- void OnBufferReadyOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state,
- scoped_refptr<VideoCapture::VideoFrameBuffer> buffer);
- void OnDeviceInfoReceivedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state,
- const VideoCaptureParams& device_info);
+ void OnFrameReadyOnMainThread(VideoCapture* capture,
+ const VideoCaptureState& state,
+ const scoped_refptr<VideoFrame>& frame);
+ void OnDeviceInfoReceivedOnMainThread(VideoCapture* capture,
+ const VideoCaptureState& state,
+ const VideoCaptureParams& device_info);
// Only accessed from main thread.
VideoCapture::EventHandler* proxied_;
diff --git a/chromium/media/video/capture/video_capture_types.cc b/chromium/media/video/capture/video_capture_types.cc
new file mode 100644
index 00000000000..5b8e2265360
--- /dev/null
+++ b/chromium/media/video/capture/video_capture_types.cc
@@ -0,0 +1,60 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/video_capture_types.h"
+
+#include "media/base/limits.h"
+
+namespace media {
+
+VideoCaptureFormat::VideoCaptureFormat()
+ : width(0),
+ height(0),
+ frame_rate(0),
+ frame_size_type(ConstantResolutionVideoCaptureDevice) {}
+
+VideoCaptureFormat::VideoCaptureFormat(
+ int width,
+ int height,
+ int frame_rate,
+ VideoCaptureResolutionType frame_size_type)
+ : width(width),
+ height(height),
+ frame_rate(frame_rate),
+ frame_size_type(frame_size_type) {}
+
+bool VideoCaptureFormat::IsValid() const {
+ return (width > 0) && (height > 0) && (frame_rate > 0) &&
+ (frame_rate < media::limits::kMaxFramesPerSecond) &&
+ (width < media::limits::kMaxDimension) &&
+ (height < media::limits::kMaxDimension) &&
+ (width * height < media::limits::kMaxCanvas) &&
+ (frame_size_type >= 0) &&
+ (frame_size_type < media::MaxVideoCaptureResolutionType);
+}
+
+VideoCaptureParams::VideoCaptureParams()
+ : session_id(0) {}
+
+VideoCaptureCapability::VideoCaptureCapability()
+ : color(PIXEL_FORMAT_UNKNOWN),
+ expected_capture_delay(0),
+ interlaced(false),
+ session_id(0) {}
+
+VideoCaptureCapability::VideoCaptureCapability(
+ int width,
+ int height,
+ int frame_rate,
+ VideoPixelFormat color,
+ int delay,
+ bool interlaced,
+ VideoCaptureResolutionType frame_size_type)
+ : VideoCaptureFormat(width, height, frame_rate, frame_size_type),
+ color(color),
+ expected_capture_delay(delay),
+ interlaced(interlaced),
+ session_id(0) {}
+
+} // namespace media
diff --git a/chromium/media/video/capture/video_capture_types.h b/chromium/media/video/capture/video_capture_types.h
index 57712727ef0..1a170aaf5e7 100644
--- a/chromium/media/video/capture/video_capture_types.h
+++ b/chromium/media/video/capture/video_capture_types.h
@@ -19,68 +19,61 @@ enum VideoCaptureResolutionType {
MaxVideoCaptureResolutionType, // Must be last.
};
-// Parameters for starting video capture and device information.
-struct VideoCaptureParams {
- VideoCaptureParams()
- : width(0),
- height(0),
- frame_per_second(0),
- session_id(0),
- frame_size_type(ConstantResolutionVideoCaptureDevice) {};
+// Color formats from camera.
+enum VideoPixelFormat {
+ PIXEL_FORMAT_UNKNOWN, // Color format not set.
+ PIXEL_FORMAT_I420,
+ PIXEL_FORMAT_YUY2,
+ PIXEL_FORMAT_UYVY,
+ PIXEL_FORMAT_RGB24,
+ PIXEL_FORMAT_ARGB,
+ PIXEL_FORMAT_MJPEG,
+ PIXEL_FORMAT_NV21,
+ PIXEL_FORMAT_YV12,
+};
+
+// Video capture format specification.
+class MEDIA_EXPORT VideoCaptureFormat {
+ public:
+ VideoCaptureFormat();
+ VideoCaptureFormat(int width,
+ int height,
+ int frame_rate,
+ VideoCaptureResolutionType frame_size_type);
+
+ // Checks that all values are in the expected range. All limits are specified
+ // in media::Limits.
+ bool IsValid() const;
+
int width;
int height;
- int frame_per_second;
- VideoCaptureSessionId session_id;
+ int frame_rate;
VideoCaptureResolutionType frame_size_type;
};
-// Capabilities describe the format a camera capture video in.
-struct VideoCaptureCapability {
- // Color formats from camera.
- enum Format {
- kColorUnknown, // Color format not set.
- kI420,
- kYUY2,
- kUYVY,
- kRGB24,
- kARGB,
- kMJPEG,
- kNV21,
- kYV12,
- };
+// Parameters for starting video capture and device information.
+class MEDIA_EXPORT VideoCaptureParams : public VideoCaptureFormat {
+ public:
+ VideoCaptureParams();
+
+ VideoCaptureSessionId session_id;
+};
- VideoCaptureCapability()
- : width(0),
- height(0),
- frame_rate(0),
- color(kColorUnknown),
- expected_capture_delay(0),
- interlaced(false),
- frame_size_type(ConstantResolutionVideoCaptureDevice),
- session_id(0) {};
+// Capabilities describe the format a camera capture video in.
+class MEDIA_EXPORT VideoCaptureCapability : public VideoCaptureFormat {
+ public:
+ VideoCaptureCapability();
VideoCaptureCapability(int width,
int height,
int frame_rate,
- Format color,
+ VideoPixelFormat color,
int delay,
bool interlaced,
- VideoCaptureResolutionType frame_size_type)
- : width(width),
- height(height),
- frame_rate(frame_rate),
- color(color),
- expected_capture_delay(delay),
- interlaced(interlaced),
- frame_size_type(frame_size_type),
- session_id(0) {};
+ VideoCaptureResolutionType frame_size_type);
- int width; // Desired width.
- int height; // Desired height.
- int frame_rate; // Desired frame rate.
- Format color; // Desired video type.
+ VideoPixelFormat color; // Desired video type.
int expected_capture_delay; // Expected delay in millisecond.
bool interlaced; // Need interlace format.
- VideoCaptureResolutionType frame_size_type;
VideoCaptureSessionId session_id;
};
diff --git a/chromium/media/video/capture/win/filter_base_win.cc b/chromium/media/video/capture/win/filter_base_win.cc
index 89309df694d..ddc68d68b2c 100644
--- a/chromium/media/video/capture/win/filter_base_win.cc
+++ b/chromium/media/video/capture/win/filter_base_win.cc
@@ -72,8 +72,6 @@ class PinEnumerator
STDMETHOD(Clone)(IEnumPins** clone) {
PinEnumerator* pin_enum = new PinEnumerator(filter_);
- if (!pin_enum)
- return E_OUTOFMEMORY;
pin_enum->AddRef();
pin_enum->index_ = index_;
*clone = pin_enum;
diff --git a/chromium/media/video/capture/win/pin_base_win.cc b/chromium/media/video/capture/win/pin_base_win.cc
index 7e2f7b095ad..3a5139ea39d 100644
--- a/chromium/media/video/capture/win/pin_base_win.cc
+++ b/chromium/media/video/capture/win/pin_base_win.cc
@@ -93,8 +93,6 @@ class TypeEnumerator
STDMETHOD(Clone)(IEnumMediaTypes** clone) {
TypeEnumerator* type_enum = new TypeEnumerator(pin_);
- if (!type_enum)
- return E_OUTOFMEMORY;
type_enum->AddRef();
type_enum->index_ = index_;
*clone = type_enum;
@@ -248,8 +246,18 @@ STDMETHODIMP PinBase::GetAllocatorRequirements(
STDMETHODIMP PinBase::ReceiveMultiple(IMediaSample** samples,
long sample_count,
long* processed) {
- NOTREACHED();
- return VFW_E_INVALIDMEDIATYPE;
+ DCHECK(samples);
+
+ HRESULT hr = S_OK;
+ *processed = 0;
+ while (sample_count--) {
+ hr = Receive(samples[*processed]);
+ // S_FALSE means don't send any more.
+ if (hr != S_OK)
+ break;
+ ++(*processed);
+ }
+ return hr;
}
STDMETHODIMP PinBase::ReceiveCanBlock() {
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.cc b/chromium/media/video/capture/win/sink_input_pin_win.cc
index 7e55531b89f..1de1ea1671a 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.cc
+++ b/chromium/media/video/capture/win/sink_input_pin_win.cc
@@ -114,17 +114,17 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
}
if (sub_type == kMediaSubTypeI420 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
- resulting_capability_.color = VideoCaptureCapability::kI420;
+ resulting_capability_.color = PIXEL_FORMAT_I420;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_YUY2 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
- resulting_capability_.color = VideoCaptureCapability::kYUY2;
+ resulting_capability_.color = PIXEL_FORMAT_YUY2;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_RGB24 &&
pvi->bmiHeader.biCompression == BI_RGB) {
- resulting_capability_.color = VideoCaptureCapability::kRGB24;
+ resulting_capability_.color = PIXEL_FORMAT_RGB24;
return true; // This format is acceptable.
}
return false;
@@ -146,7 +146,7 @@ void SinkInputPin::SetRequestedMediaCapability(
resulting_capability_.width = 0;
resulting_capability_.height = 0;
resulting_capability_.frame_rate = 0;
- resulting_capability_.color = VideoCaptureCapability::kColorUnknown;
+ resulting_capability_.color = PIXEL_FORMAT_UNKNOWN;
resulting_capability_.expected_capture_delay = 0;
resulting_capability_.interlaced = false;
}
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.cc b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
index dea97b7c264..874408fb2cd 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
@@ -70,18 +70,18 @@ bool CreateVideoCaptureDevice(const char* sym_link, IMFMediaSource** source) {
return SUCCEEDED(MFCreateDeviceSource(attributes, source));
}
-bool FormatFromGuid(const GUID& guid, VideoCaptureCapability::Format* format) {
+bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format) {
struct {
const GUID& guid;
- const VideoCaptureCapability::Format format;
+ const VideoPixelFormat format;
} static const kFormatMap[] = {
- { MFVideoFormat_I420, VideoCaptureCapability::kI420 },
- { MFVideoFormat_YUY2, VideoCaptureCapability::kYUY2 },
- { MFVideoFormat_UYVY, VideoCaptureCapability::kUYVY },
- { MFVideoFormat_RGB24, VideoCaptureCapability::kRGB24 },
- { MFVideoFormat_ARGB32, VideoCaptureCapability::kARGB },
- { MFVideoFormat_MJPG, VideoCaptureCapability::kMJPEG },
- { MFVideoFormat_YV12, VideoCaptureCapability::kYV12 },
+ { MFVideoFormat_I420, PIXEL_FORMAT_I420 },
+ { MFVideoFormat_YUY2, PIXEL_FORMAT_YUY2 },
+ { MFVideoFormat_UYVY, PIXEL_FORMAT_UYVY },
+ { MFVideoFormat_RGB24, PIXEL_FORMAT_RGB24 },
+ { MFVideoFormat_ARGB32, PIXEL_FORMAT_ARGB },
+ { MFVideoFormat_MJPG, PIXEL_FORMAT_MJPEG },
+ { MFVideoFormat_YV12, PIXEL_FORMAT_YV12 },
};
for (int i = 0; i < arraysize(kFormatMap); ++i) {
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.h b/chromium/media/video/capture/win/video_capture_device_mf_win.h
index b4ef6fcb098..2daa03535dd 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.h
@@ -28,7 +28,7 @@ class MFReaderCallback;
class MEDIA_EXPORT VideoCaptureDeviceMFWin
: public base::NonThreadSafe,
- public VideoCaptureDevice {
+ public VideoCaptureDevice1 {
public:
explicit VideoCaptureDeviceMFWin(const Name& device_name);
virtual ~VideoCaptureDeviceMFWin();
diff --git a/chromium/media/video/capture/win/video_capture_device_win.cc b/chromium/media/video/capture/win/video_capture_device_win.cc
index 1a7c3b7b968..307ab2967bf 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_win.cc
@@ -10,6 +10,7 @@
#include "base/command_line.h"
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
+#include "base/win/metro.h"
#include "base/win/scoped_variant.h"
#include "media/base/media_switches.h"
#include "media/video/capture/win/video_capture_device_mf_win.h"
@@ -150,22 +151,15 @@ void DeleteMediaType(AM_MEDIA_TYPE* mt) {
// static
void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
- Names::iterator it;
-
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (VideoCaptureDeviceMFWin::PlatformSupported() &&
+ // Use Media Foundation for Metro processes (after and including Win8)
+ // and DirectShow for any other platforms.
+ if (base::win::IsMetroProcess() &&
!cmd_line->HasSwitch(switches::kForceDirectShowVideoCapture)) {
VideoCaptureDeviceMFWin::GetDeviceNames(device_names);
+ } else {
+ VideoCaptureDeviceWin::GetDeviceNames(device_names);
}
- // Retrieve the devices with DirectShow (DS) interface. They might (partially)
- // overlap with the MediaFoundation (MF), so the list has to be consolidated.
- Names temp_names;
- VideoCaptureDeviceWin::GetDeviceNames(&temp_names);
-
- // Merge the DS devices into the MF device list, and next remove
- // the duplicates, giving priority to the MF "versions".
- device_names->merge(temp_names);
- device_names->unique();
}
// static
@@ -388,7 +382,7 @@ void VideoCaptureDeviceWin::Allocate(
if (FAILED(hr))
SetErrorState("Failed to set capture device output format");
- if (capability.color == VideoCaptureCapability::kMJPEG &&
+ if (capability.color == PIXEL_FORMAT_MJPEG &&
!mjpg_filter_.get()) {
// Create MJPG filter if we need it.
hr = mjpg_filter_.CreateInstance(CLSID_MjpegDec, NULL, CLSCTX_INPROC);
@@ -407,7 +401,7 @@ void VideoCaptureDeviceWin::Allocate(
}
}
- if (capability.color == VideoCaptureCapability::kMJPEG &&
+ if (capability.color == PIXEL_FORMAT_MJPEG &&
mjpg_filter_.get()) {
// Connect the camera to the MJPEG decoder.
hr = graph_builder_->ConnectDirect(output_capture_pin_, input_mjpg_pin_,
@@ -587,20 +581,20 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
// We can't switch MEDIATYPE :~(.
if (media_type->subtype == kMediaSubTypeI420) {
- capability.color = VideoCaptureCapability::kI420;
+ capability.color = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_IYUV) {
- // This is identical to kI420.
- capability.color = VideoCaptureCapability::kI420;
+ // This is identical to PIXEL_FORMAT_I420.
+ capability.color = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_RGB24) {
- capability.color = VideoCaptureCapability::kRGB24;
+ capability.color = PIXEL_FORMAT_RGB24;
} else if (media_type->subtype == MEDIASUBTYPE_YUY2) {
- capability.color = VideoCaptureCapability::kYUY2;
+ capability.color = PIXEL_FORMAT_YUY2;
} else if (media_type->subtype == MEDIASUBTYPE_MJPG) {
- capability.color = VideoCaptureCapability::kMJPEG;
+ capability.color = PIXEL_FORMAT_MJPEG;
} else if (media_type->subtype == MEDIASUBTYPE_UYVY) {
- capability.color = VideoCaptureCapability::kUYVY;
+ capability.color = PIXEL_FORMAT_UYVY;
} else if (media_type->subtype == MEDIASUBTYPE_ARGB32) {
- capability.color = VideoCaptureCapability::kARGB;
+ capability.color = PIXEL_FORMAT_ARGB;
} else {
WCHAR guid_str[128];
StringFromGUID2(media_type->subtype, guid_str, arraysize(guid_str));
diff --git a/chromium/media/video/capture/win/video_capture_device_win.h b/chromium/media/video/capture/win/video_capture_device_win.h
index f6c1a9b0420..4c83d6b3062 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_win.h
@@ -30,7 +30,7 @@ namespace media {
// All the methods in the class can only be run on a COM initialized thread.
class VideoCaptureDeviceWin
: public base::NonThreadSafe,
- public VideoCaptureDevice,
+ public VideoCaptureDevice1,
public SinkFilterObserver {
public:
explicit VideoCaptureDeviceWin(const Name& device_name);
diff --git a/chromium/media/filters/gpu_video_decoder_factories.cc b/chromium/media/video/video_encode_accelerator.cc
index 67d24ce3773..6309180bceb 100644
--- a/chromium/media/filters/gpu_video_decoder_factories.cc
+++ b/chromium/media/video/video_encode_accelerator.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/video/video_encode_accelerator.h"
namespace media {
-GpuVideoDecoderFactories::~GpuVideoDecoderFactories() {}
+VideoEncodeAccelerator::~VideoEncodeAccelerator() {}
} // namespace media
diff --git a/chromium/media/video/video_encode_accelerator.h b/chromium/media/video/video_encode_accelerator.h
new file mode 100644
index 00000000000..8d4f56536bf
--- /dev/null
+++ b/chromium/media/video/video_encode_accelerator.h
@@ -0,0 +1,145 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_VIDEO_VIDEO_ENCODE_ACCELERATOR_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/media_export.h"
+#include "media/base/video_decoder_config.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+class BitstreamBuffer;
+class VideoFrame;
+
+// Video encoder interface.
+class MEDIA_EXPORT VideoEncodeAccelerator {
+ public:
+ virtual ~VideoEncodeAccelerator();
+
+ // Specification of an encoding profile supported by an encoder.
+ struct SupportedProfile {
+ VideoCodecProfile profile;
+ gfx::Size max_resolution;
+ struct {
+ uint32 numerator;
+ uint32 denominator;
+ } max_framerate;
+ };
+
+ // Enumeration of potential errors generated by the API.
+ enum Error {
+ // An operation was attempted during an incompatible encoder state.
+ kIllegalStateError,
+ // Invalid argument was passed to an API method.
+ kInvalidArgumentError,
+ // A failure occurred at the GPU process or one of its dependencies.
+ // Examples of such failures include GPU hardware failures, GPU driver
+ // failures, GPU library failures, GPU process programming errors, and so
+ // on.
+ kPlatformFailureError,
+ };
+
+ // Interface for clients that use VideoEncodeAccelerator.
+ class MEDIA_EXPORT Client {
+ public:
+ // Callback to notify client that encoder has been successfully initialized.
+ virtual void NotifyInitializeDone() = 0;
+
+ // Callback to tell the client what size of frames and buffers to provide
+ // for input and output. The VEA disclaims use or ownership of all
+ // previously provided buffers once this callback is made.
+ // Parameters:
+ // |input_count| is the number of input VideoFrames required for encoding.
+ // The client should be prepared to feed at least this many frames into the
+ // encoder before being returned any input frames, since the encoder may
+ // need to hold onto some subset of inputs as reference pictures.
+ // |input_coded_size| is the logical size of the input frames (as reported
+ // by VideoFrame::coded_size()) to encode, in pixels. The encoder may have
+ // hardware alignment requirements that make this different from
+ // |input_visible_size|, as requested in Initialize(), in which case the
+ // input VideoFrame to Encode() should be padded appropriately.
+ // |output_buffer_size| is the required size of output buffers for this
+ // encoder in bytes.
+ virtual void RequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) = 0;
+
+ // Callback to deliver encoded bitstream buffers. Ownership of the buffer
+ // is transferred back to the VEA::Client once this callback is made.
+ // Parameters:
+ // |bitstream_buffer_id| is the id of the buffer that is ready.
+ // |payload_size| is the byte size of the used portion of the buffer.
+ // |key_frame| is true if this delivered frame is a keyframe.
+ virtual void BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) = 0;
+
+ // Error notification callback.
+ virtual void NotifyError(Error error) = 0;
+
+ protected:
+ // Clients are not owned by VEA instances and should not be deleted through
+ // these pointers.
+ virtual ~Client() {}
+ };
+
+ // Video encoder functions.
+
+ // Initialize the video encoder with a specific configuration. Called once
+ // per encoder construction.
+ // Parameters:
+ // |input_format| is the frame format of the input stream (as would be
+ // reported by VideoFrame::format() for frames passed to Encode()).
+ // |input_visible_size| is the resolution of the input stream (as would be
+ // reported by VideoFrame::visible_rect().size() for frames passed to
+ // Encode()).
+ // |output_profile| is the codec profile of the encoded output stream.
+ // |initial_bitrate| is the initial bitrate of the encoded output stream,
+ // in bits per second.
+ // TODO(sheu): handle resolution changes. http://crbug.com/249944
+ virtual void Initialize(media::VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ VideoCodecProfile output_profile,
+ uint32 initial_bitrate) = 0;
+
+ // Encodes the given frame.
+ // Parameters:
+ // |frame| is the VideoFrame that is to be encoded.
+ // |force_keyframe| forces the encoding of a keyframe for this frame.
+ virtual void Encode(const scoped_refptr<VideoFrame>& frame,
+ bool force_keyframe) = 0;
+
+ // Send a bitstream buffer to the encoder to be used for storing future
+ // encoded output. Each call here with a given |buffer| will cause the buffer
+ // to be filled once, then returned with BitstreamBufferReady().
+ // Parameters:
+ // |buffer| is the bitstream buffer to use for output.
+ virtual void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) = 0;
+
+ // Request a change to the encoding parameters. This is only a request,
+ // fulfilled on a best-effort basis.
+ // Parameters:
+ // |bitrate| is the requested new bitrate, in bits per second.
+ // |framerate| is the requested new framerate, in frames per second.
+ virtual void RequestEncodingParametersChange(uint32 bitrate,
+ uint32 framerate) = 0;
+
+ // Destroys the encoder: all pending inputs and outputs are dropped
+ // immediately and the component is freed. This call may asynchronously free
+ // system resources, but its client-visible effects are synchronous. After
+ // this method returns no more callbacks will be made on the client. Deletes
+ // |this| unconditionally, so make sure to drop all pointers to it!
+ virtual void Destroy() = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/chromium/media/webm/webm_audio_client.cc b/chromium/media/webm/webm_audio_client.cc
index e52f44b4a9a..1ef640c0dc4 100644
--- a/chromium/media/webm/webm_audio_client.cc
+++ b/chromium/media/webm/webm_audio_client.cc
@@ -26,12 +26,15 @@ void WebMAudioClient::Reset() {
bool WebMAudioClient::InitializeConfig(
const std::string& codec_id, const std::vector<uint8>& codec_private,
- bool is_encrypted, AudioDecoderConfig* config) {
+ int64 seek_preroll, int64 codec_delay, bool is_encrypted,
+ AudioDecoderConfig* config) {
DCHECK(config);
AudioCodec audio_codec = kUnknownAudioCodec;
if (codec_id == "A_VORBIS") {
audio_codec = kCodecVorbis;
+ } else if (codec_id == "A_OPUS") {
+ audio_codec = kCodecOpus;
} else {
MEDIA_LOG(log_cb_) << "Unsupported audio codec_id " << codec_id;
return false;
@@ -63,8 +66,14 @@ bool WebMAudioClient::InitializeConfig(
}
config->Initialize(
- audio_codec, kSampleFormatPlanarF32, channel_layout,
- samples_per_second, extra_data, extra_data_size, is_encrypted, true);
+ audio_codec,
+ (audio_codec == kCodecOpus) ? kSampleFormatS16 : kSampleFormatPlanarF32,
+ channel_layout,
+ samples_per_second, extra_data, extra_data_size, is_encrypted, true,
+ base::TimeDelta::FromMicroseconds(
+ (seek_preroll != -1 ? seek_preroll : 0) / 1000),
+ base::TimeDelta::FromMicroseconds(
+ (codec_delay != -1 ? codec_delay : 0) / 1000));
return config->IsValidConfig();
}
diff --git a/chromium/media/webm/webm_audio_client.h b/chromium/media/webm/webm_audio_client.h
index 1338f5cbd66..7874cec4bea 100644
--- a/chromium/media/webm/webm_audio_client.h
+++ b/chromium/media/webm/webm_audio_client.h
@@ -31,6 +31,8 @@ class WebMAudioClient : public WebMParserClient {
// audio track element fields.
bool InitializeConfig(const std::string& codec_id,
const std::vector<uint8>& codec_private,
+ const int64 seek_preroll,
+ const int64 codec_delay,
bool is_encrypted,
AudioDecoderConfig* config);
diff --git a/chromium/media/webm/webm_cluster_parser.cc b/chromium/media/webm/webm_cluster_parser.cc
index f83a3652f03..87cccae4da1 100644
--- a/chromium/media/webm/webm_cluster_parser.cc
+++ b/chromium/media/webm/webm_cluster_parser.cc
@@ -64,6 +64,7 @@ WebMClusterParser::WebMClusterParser(
block_duration_(-1),
block_add_id_(-1),
block_additional_data_size_(-1),
+ discard_padding_(-1),
cluster_timecode_(-1),
cluster_start_time_(kNoTimestamp()),
cluster_ended_(false),
@@ -137,6 +138,8 @@ WebMParserClient* WebMClusterParser::OnListStart(int id) {
block_data_.reset();
block_data_size_ = -1;
block_duration_ = -1;
+ discard_padding_ = -1;
+ discard_padding_set_ = false;
} else if (id == kWebMIdBlockAdditions) {
block_add_id_ = -1;
block_additional_data_.reset();
@@ -158,13 +161,16 @@ bool WebMClusterParser::OnListEnd(int id) {
bool result = ParseBlock(false, block_data_.get(), block_data_size_,
block_additional_data_.get(),
- block_additional_data_size_, block_duration_);
+ block_additional_data_size_, block_duration_,
+ discard_padding_set_ ? discard_padding_ : 0);
block_data_.reset();
block_data_size_ = -1;
block_duration_ = -1;
block_add_id_ = -1;
block_additional_data_.reset();
block_additional_data_size_ = -1;
+ discard_padding_ = -1;
+ discard_padding_set_ = false;
return result;
}
@@ -180,6 +186,12 @@ bool WebMClusterParser::OnUInt(int id, int64 val) {
case kWebMIdBlockAddID:
dst = &block_add_id_;
break;
+ case kWebMIdDiscardPadding:
+ if (discard_padding_set_)
+ return false;
+ discard_padding_set_ = true;
+ discard_padding_ = val;
+ return true;
default:
return true;
}
@@ -191,7 +203,8 @@ bool WebMClusterParser::OnUInt(int id, int64 val) {
bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
int size, const uint8* additional,
- int additional_size, int duration) {
+ int additional_size, int duration,
+ int64 discard_padding) {
if (size < 4)
return false;
@@ -214,18 +227,19 @@ bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
// Sign extend negative timecode offsets.
if (timecode & 0x8000)
- timecode |= (-1 << 16);
+ timecode |= ~0xffff;
const uint8* frame_data = buf + 4;
int frame_size = size - (frame_data - buf);
return OnBlock(is_simple_block, track_num, timecode, duration, flags,
- frame_data, frame_size, additional, additional_size);
+ frame_data, frame_size, additional, additional_size,
+ discard_padding);
}
bool WebMClusterParser::OnBinary(int id, const uint8* data, int size) {
switch (id) {
case kWebMIdSimpleBlock:
- return ParseBlock(true, data, size, NULL, -1, -1);
+ return ParseBlock(true, data, size, NULL, -1, -1, 0);
case kWebMIdBlock:
if (block_data_) {
@@ -270,13 +284,16 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
int block_duration,
int flags,
const uint8* data, int size,
- const uint8* additional, int additional_size) {
+ const uint8* additional, int additional_size,
+ int64 discard_padding) {
DCHECK_GE(size, 0);
if (cluster_timecode_ == -1) {
MEDIA_LOG(log_cb_) << "Got a block before cluster timecode.";
return false;
}
+ // TODO(acolwell): Should relative negative timecode offsets be rejected? Or
+ // only when the absolute timecode is negative? See http://crbug.com/271794
if (timecode < 0) {
MEDIA_LOG(log_cb_) << "Got a block with negative timecode offset "
<< timecode;
@@ -348,6 +365,11 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
block_duration * timecode_multiplier_));
}
+ if (discard_padding != 0) {
+ buffer->set_discard_padding(base::TimeDelta::FromMicroseconds(
+ discard_padding / 1000));
+ }
+
return track->AddBuffer(buffer);
}
diff --git a/chromium/media/webm/webm_cluster_parser.h b/chromium/media/webm/webm_cluster_parser.h
index e156d47c23b..5aa957cdee6 100644
--- a/chromium/media/webm/webm_cluster_parser.h
+++ b/chromium/media/webm/webm_cluster_parser.h
@@ -110,10 +110,12 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
virtual bool OnBinary(int id, const uint8* data, int size) OVERRIDE;
bool ParseBlock(bool is_simple_block, const uint8* buf, int size,
- const uint8* additional, int additional_size, int duration);
+ const uint8* additional, int additional_size, int duration,
+ int64 discard_padding);
bool OnBlock(bool is_simple_block, int track_num, int timecode, int duration,
int flags, const uint8* data, int size,
- const uint8* additional, int additional_size);
+ const uint8* additional, int additional_size,
+ int64 discard_padding);
// Resets the Track objects associated with each text track.
void ResetTextTracks();
@@ -137,6 +139,8 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
int64 block_add_id_;
scoped_ptr<uint8[]> block_additional_data_;
int block_additional_data_size_;
+ int64 discard_padding_;
+ bool discard_padding_set_;
int64 cluster_timecode_;
base::TimeDelta cluster_start_time_;
diff --git a/chromium/media/webm/webm_constants.h b/chromium/media/webm/webm_constants.h
index cda45e00d45..3a35dbab377 100644
--- a/chromium/media/webm/webm_constants.h
+++ b/chromium/media/webm/webm_constants.h
@@ -57,6 +57,7 @@ const int kWebMIdChapterTranslateID = 0x69A5;
const int kWebMIdChapterUID = 0x73C4;
const int kWebMIdCluster = 0x1F43B675;
const int kWebMIdCodecDecodeAll = 0xAA;
+const int kWebMIdCodecDelay = 0x56AA;
const int kWebMIdCodecID = 0x86;
const int kWebMIdCodecName = 0x258688;
const int kWebMIdCodecPrivate = 0x63A2;
@@ -91,6 +92,7 @@ const int kWebMIdCueTrack = 0xF7;
const int kWebMIdCueTrackPositions = 0xB7;
const int kWebMIdDateUTC = 0x4461;
const int kWebMIdDefaultDuration = 0x23E383;
+const int kWebMIdDiscardPadding = 0x75A2;
const int kWebMIdDisplayHeight = 0x54BA;
const int kWebMIdDisplayUnit = 0x54B2;
const int kWebMIdDisplayWidth = 0x54B0;
@@ -118,6 +120,7 @@ const int kWebMIdFlagEnabled = 0xB9;
const int kWebMIdFlagForced = 0x55AA;
const int kWebMIdFlagInterlaced = 0x9A;
const int kWebMIdFlagLacing = 0x9C;
+const int kWebMIdFrameRate = 0x2383E3;
const int kWebMIdInfo = 0x1549A966;
const int kWebMIdJoinBlocks = 0xE9;
const int kWebMIdLaceNumber = 0xCC;
@@ -147,6 +150,7 @@ const int kWebMIdSeek = 0x4DBB;
const int kWebMIdSeekHead = 0x114D9B74;
const int kWebMIdSeekID = 0x53AB;
const int kWebMIdSeekPosition = 0x53AC;
+const int kWebMIdSeekPreRoll = 0x56BB;
const int kWebMIdSegment = 0x18538067;
const int kWebMIdSegmentFamily = 0x4444;
const int kWebMIdSegmentFilename = 0x7384;
diff --git a/chromium/media/webm/webm_parser.cc b/chromium/media/webm/webm_parser.cc
index 30e5c1b5e56..f1509abb830 100644
--- a/chromium/media/webm/webm_parser.cc
+++ b/chromium/media/webm/webm_parser.cc
@@ -118,6 +118,7 @@ static const ElementIdInfo kBlockGroupIds[] = {
{UINT, kWebMIdReferencePriority},
{BINARY, kWebMIdReferenceBlock},
{BINARY, kWebMIdCodecState},
+ {UINT, kWebMIdDiscardPadding},
{LIST, kWebMIdSlices},
};
@@ -163,6 +164,8 @@ static const ElementIdInfo kTrackEntryIds[] = {
{UINT, kWebMIdAttachmentLink},
{UINT, kWebMIdCodecDecodeAll},
{UINT, kWebMIdTrackOverlay},
+ {UINT, kWebMIdCodecDelay},
+ {UINT, kWebMIdSeekPreRoll},
{LIST, kWebMIdTrackTranslate},
{LIST, kWebMIdVideo},
{LIST, kWebMIdAudio},
@@ -191,6 +194,7 @@ static const ElementIdInfo kVideoIds[] = {
{UINT, kWebMIdDisplayUnit},
{UINT, kWebMIdAspectRatioType},
{BINARY, kWebMIdColorSpace},
+ {FLOAT, kWebMIdFrameRate},
};
static const ElementIdInfo kAudioIds[] = {
diff --git a/chromium/media/webm/webm_stream_parser.cc b/chromium/media/webm/webm_stream_parser.cc
index 796a1b3b095..12be4492684 100644
--- a/chromium/media/webm/webm_stream_parser.cc
+++ b/chromium/media/webm/webm_stream_parser.cc
@@ -319,11 +319,8 @@ int WebMStreamParser::ParseCluster(const uint8* data, int size) {
}
void WebMStreamParser::FireNeedKey(const std::string& key_id) {
- int key_id_size = key_id.size();
- DCHECK_GT(key_id_size, 0);
- scoped_ptr<uint8[]> key_id_array(new uint8[key_id_size]);
- memcpy(key_id_array.get(), key_id.data(), key_id_size);
- need_key_cb_.Run(kWebMEncryptInitDataType, key_id_array.Pass(), key_id_size);
+ std::vector<uint8> key_id_vector(key_id.begin(), key_id.end());
+ need_key_cb_.Run(kWebMEncryptInitDataType, key_id_vector);
}
} // namespace media
diff --git a/chromium/media/webm/webm_tracks_parser.cc b/chromium/media/webm/webm_tracks_parser.cc
index 67bac044e46..aa28d6feef9 100644
--- a/chromium/media/webm/webm_tracks_parser.cc
+++ b/chromium/media/webm/webm_tracks_parser.cc
@@ -31,6 +31,8 @@ static TextKind CodecIdToTextKind(const std::string& codec_id) {
WebMTracksParser::WebMTracksParser(const LogCB& log_cb, bool ignore_text_tracks)
: track_type_(-1),
track_num_(-1),
+ seek_preroll_(-1),
+ codec_delay_(-1),
audio_track_num_(-1),
video_track_num_(-1),
ignore_text_tracks_(ignore_text_tracks),
@@ -161,8 +163,8 @@ bool WebMTracksParser::OnListEnd(int id) {
DCHECK(!audio_decoder_config_.IsValidConfig());
if (!audio_client_.InitializeConfig(
- codec_id_, codec_private_, !audio_encryption_key_id_.empty(),
- &audio_decoder_config_)) {
+ codec_id_, codec_private_, seek_preroll_, codec_delay_,
+ !audio_encryption_key_id_.empty(), &audio_decoder_config_)) {
return false;
}
} else {
@@ -226,6 +228,12 @@ bool WebMTracksParser::OnUInt(int id, int64 val) {
case kWebMIdTrackType:
dst = &track_type_;
break;
+ case kWebMIdSeekPreRoll:
+ dst = &seek_preroll_;
+ break;
+ case kWebMIdCodecDelay:
+ dst = &codec_delay_;
+ break;
default:
return true;
}
diff --git a/chromium/media/webm/webm_tracks_parser.h b/chromium/media/webm/webm_tracks_parser.h
index 81588e4b51e..d3993207a14 100644
--- a/chromium/media/webm/webm_tracks_parser.h
+++ b/chromium/media/webm/webm_tracks_parser.h
@@ -83,6 +83,8 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
std::string track_language_;
std::string codec_id_;
std::vector<uint8> codec_private_;
+ int64 seek_preroll_;
+ int64 codec_delay_;
scoped_ptr<WebMContentEncodingsClient> track_content_encodings_client_;
int64 audio_track_num_;